text
stringlengths 4
1.02M
| meta
dict |
---|---|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implementer
from typing import Callable, List, Optional, Tuple
@implementer(IPlugin, IModuleData)
class ServerTime(ModuleData):
name = "ServerTime"
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("capabilitylist", 1, self.addCapability) ]
def load(self) -> None:
if "unloading-server-time" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-server-time"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("server-time")
def unload(self) -> Optional["Deferred"]:
self.ircd.dataCache["unloading-server-time"] = True
def fullUnload(self) -> Optional["Deferred"]:
del self.ircd.dataCache["unloading-server-time"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("server-time")
def addCapability(self, user: "IRCUser", capList: List[str]) -> None:
capList.append("server-time")
serverTime = ServerTime() | {
"content_hash": "4ccc9cec746f60c37454bc4ac73ba965",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 33.806451612903224,
"alnum_prop": 0.7337786259541985,
"repo_name": "Heufneutje/txircd",
"id": "9b13d05f314ee5647f186035fb60f3ca688a61ac",
"size": "1048",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/next",
"path": "txircd/modules/ircv3/servertime.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "547"
},
{
"name": "Python",
"bytes": "792279"
}
],
"symlink_target": ""
} |
import unittest
from hwt.simulator.simTestCase import SimTestCase
from hwtSimApi.constants import CLK_PERIOD
from hwtLib.examples.arithmetic.privateSignals import PrivateSignalsOfStructType
class PrivateSignalsOfStructTypeTC(SimTestCase):
@classmethod
def setUpClass(cls):
cls.u = PrivateSignalsOfStructType()
cls.compileSim(cls.u)
def test_pass_data(self):
u = self.u
u.a._ag.data.extend(range(30))
u.c._ag.data.extend(range(30))
self.runSim(30 * CLK_PERIOD)
eq = self.assertValSequenceEqual
eq(u.b._ag.data, list(range(30 - 1)))
eq(u.d._ag.data, list(range(6, -1, -1)) + list(range(30 - 6 - 2)))
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(PrivateSignalsOfStructTypeTC('test_pass_data'))
suite.addTest(unittest.makeSuite(PrivateSignalsOfStructTypeTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| {
"content_hash": "9a72f8ad6d0647f7850450bd5ef8648c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 29.96875,
"alnum_prop": 0.6809176225234619,
"repo_name": "Nic30/hwtLib",
"id": "a088c5978c3145ea377c475c7731900b2282d821",
"size": "1007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/examples/arithmetic/privateSignals_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Jonathan Fuerst <[email protected]>
"""
import os, requests, __builtin__
from smap import actuate, driver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
from requests.auth import HTTPDigestAuth
import json
import time
import bcrypt
import unicodedata
from twisted.internet import threads
class WIFI(driver.SmapDriver):
client_api = [
{"api": "rssi", "access": "r", "data_type":"double", "unit": "dBm"},
{"api": "signalToNoiseRatio", "access": "r", "data_type":"double", "unit": "dB"},
{"api": "clientOS", "access": "r", "data_type":"string"},
{"api": "deviceName", "access": "r", "data_type":"string"}
]
def getDevices(self):
devices = {}
r = requests.get(self.url+"/hm/api/v1/devices", auth=(self.user, self.password))
j = json.loads(r.text)
for d in j:
hostName = d["hostName"]
hostName = unicodedata.normalize('NFKD', hostName).encode('ascii','ignore')
location = d["location"]
location = unicodedata.normalize('NFKD', location).encode('ascii','ignore')
self.add_collection("/"+hostName)
cl2 = self.get_collection("/"+hostName)
cl2['Metadata'] = {
'Location' : {
'Room' : location
}
}
devices[hostName] = {"deviceName": d["hostName"], "location": location}
return devices
def getClients(self):
clients = []
r = requests.get(self.url+"/hm/api/v1/clients?q=10", auth=(self.user, self.password))
j = json.loads(r.text)
for c in j:
mac = c["macAddress"]
hashed = bcrypt.hashpw(mac.encode('latin-1'), bcrypt.gensalt())
hashed = hashed.replace("/", "")
print hashed
# hashed = mac
try:
rssi_str = c["rssi"][:-4]
rssi = float(rssi_str)
except:
rssi = 0.0
try:
snr_str = c["signalToNoiseRatio"][:-3]
snr = float(snr_str)
except:
snr = 0.0
snr_str = c["signalToNoiseRatio"][:-3]
deviceName = c["deviceName"]
deviceName = unicodedata.normalize('NFKD', deviceName).encode('ascii','ignore')
clients.append({"id": hashed, "deviceName":deviceName, "rssi":rssi, "signalToNoiseRatio":snr})
return clients
def setup(self, opts):
self.tz = opts.get('Metadata/Timezone', None)
self.url = opts.get('url', None)
self.password = opts.get('password', None)
self.user = opts.get('user', None)
self.rate = float(opts.get('Rate', 300))
# Get all accesspoints
self.devices = self.getDevices()
# for d in self.devices:
# self.add_collection("/"+d["deviceName"])
# Get all clients
# for option in self.api:
# self.add_timeseries('/'+ tstat_device + '/' +option["api"],
# option["unit"], data_type=option["data_type"], timezone=self.tz)
def start(self):
# call self.read every self.rate seconds
periodicSequentialCall(self.read).start(self.rate)
def read(self):
self.clients = self.getClients()
for c in self.clients:
d_n = c["deviceName"]
c_id = c["id"]
c_rssi = c["rssi"]
c_snr = c["signalToNoiseRatio"]
path = "/"+d_n +"/"+ str(c_id)
print c_rssi
if self.get_timeseries(path+"/rssi") is None:
self.add_timeseries(path+"/rssi",
"dbm", data_type="double", timezone=self.tz)
self.add_timeseries(path+"/snr",
"db", data_type="double", timezone=self.tz)
self.add(path+"/rssi", c_rssi)
self.add(path+"/snr", c_snr)
def remove_non_ascii(text):
return ''.join(i for i in text if ord(i)<128)
def whatisthis(s):
if isinstance(s, str):
print "ordinary string"
elif isinstance(s, unicode):
print "unicode string"
else:
print "not a string"
| {
"content_hash": "432eb34d89f00e128898c4a1e9fafd67",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 106,
"avg_line_length": 39.340425531914896,
"alnum_prop": 0.599242833964305,
"repo_name": "jf87/smap",
"id": "f85491c08c90350a9904411cb62e370bb7962dd2",
"size": "5547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/smap/drivers/wifi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "325117"
},
{
"name": "HTML",
"bytes": "9642"
},
{
"name": "Java",
"bytes": "47918"
},
{
"name": "Lua",
"bytes": "9058"
},
{
"name": "Makefile",
"bytes": "5715"
},
{
"name": "Python",
"bytes": "1704999"
},
{
"name": "R",
"bytes": "23461"
},
{
"name": "Shell",
"bytes": "1273"
},
{
"name": "TeX",
"bytes": "40212"
},
{
"name": "XSLT",
"bytes": "5081"
}
],
"symlink_target": ""
} |
'''
Test Cases for Document Class for Word Cloud Project
Daniel Klein
Computer-Based Honors Program
The University of Alabama
9.12.2013
'''
import unittest
import pickle
from src.core.python.Metadata import Metadata
from src.core.python.Document import Document
import os, os.path
class DocumentTest(unittest.TestCase):
def setUp(self):
self.test_text = ("Here is some test text. Blah blah blah blah \n"
+ "1234567890987654321 Yea Alabama Drown 'em Tide!\n")
self.test_metadata = Metadata()
self.test_filename = "test_document.txt"
self.test_document = Document(self.test_metadata,
self.test_text,
self.test_filename)
def tearDown(self):
del self.test_metadata
del self.test_document
def test_serialization(self):
print("Testing Document.write_to_file()...")
test_output_path = self.test_document.output_filename
if not self.test_document.write_to_file():
self.fail("Document class: You need to write a serialization test.")
with open(test_output_path, 'r') as pickled_doc_file:
unpickled_doc = pickle.load(pickled_doc_file)
self.assertEqual(unpickled_doc.doc_text, self.test_document.doc_text)
self.assertEqual(unpickled_doc.word_count, self.test_document.word_count)
self.assertEqual(unpickled_doc.output_filename, self.test_document.output_filename)
# clean-up
os.remove(test_output_path)
def test_serialization_output_nonwritable(self):
print("Testing Document.write_to_file() with read-only output file...")
test_output_path = self.test_document.output_filename
with open(test_output_path, 'w') as touch:
pass
os.chmod(test_output_path, 0444)
self.assertRaises(IOError, self.test_document.write_to_file)
# clean-up
os.chmod(test_output_path, 0777)
os.remove(test_output_path)
def test_convert_to_string(self):
print("Testing Document.__str__()...")
expected_string = ""
expected_string += str(self.test_metadata)
expected_string += self.test_text
self.assertEqual(str(self.test_document), expected_string)
def test_count_words(self):
print("Testing Document.count_words()...")
expected_word_count = 15
self.assertEqual(self.test_document.word_count, expected_word_count)
def test_print_doc(self):
print("Testing Document.print_doc()...")
self.test_document.print_doc()
print("Document.print_doc() testing finished.***")
def test_print_metadata(self):
print("Testing Document.print_metadata()...")
self.test_document.print_metadata()
print("Document.print_metadata() testing finished.***")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | {
"content_hash": "0d86026ffd04542a17b23199ec0ff7ad",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 91,
"avg_line_length": 35.056179775280896,
"alnum_prop": 0.6076923076923076,
"repo_name": "dmarklein/WordCloud",
"id": "3297d030f76812043624f6b4a501f9fdde8b673a",
"size": "3120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/python/DocumentTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5968"
},
{
"name": "CSS",
"bytes": "17383"
},
{
"name": "Groovy",
"bytes": "82610"
},
{
"name": "HTML",
"bytes": "1258"
},
{
"name": "Java",
"bytes": "118357"
},
{
"name": "JavaScript",
"bytes": "13989"
},
{
"name": "PowerShell",
"bytes": "507"
},
{
"name": "Python",
"bytes": "172532"
}
],
"symlink_target": ""
} |
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.employee_response import EmployeeResponse
class TestEmployeeResponse(unittest.TestCase):
""" EmployeeResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testEmployeeResponse(self):
"""
Test EmployeeResponse
"""
model = kinow_client.models.employee_response.EmployeeResponse()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "29504e65cc9d153b271b1839b6a7f039",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 19.075,
"alnum_prop": 0.6736566186107471,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "bdb56c38249d6259a3cab3f532cab711ae183a0b",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_employee_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
} |
import logging
import numpy as np
import dill
from collections import defaultdict
from skipthought import utils
class Batch:
def __init__(self, data, pad_value, go_value, eos_value):
"""Class which creates batch from data and could be passed into
SkipthoughtModel._fill_feed_dict_* methods.
For encoder batches, `seq_lengths` field is used in order to fill feed_dict.
For decoder batches, `weights` field is used in order to fill feed_dict.
(See SkipthoughtModel code)
Args:
data (np.array): Encoded and padded batch.
pad_value (int): <pad> token index.
go_value (int): <go> token index.
eos_value (int): <eos> token index.
"""
self.data = data
self.pad_value = pad_value
self.go_value = go_value
self.eos_value = eos_value
self._weights = utils.seq_loss_weights(self.data, self.pad_value)
self._seq_lengths = utils.sequence_lengths(self.data, self.pad_value)
def __getitem__(self, item):
return self.data[item]
def __repr__(self):
return self.data.__repr__()
@property
def weights(self):
return self._weights
@property
def seq_lengths(self):
return self._seq_lengths
@property
def shape(self):
return self.data.shape
class Vocab:
EOS_TOKEN = "<eos>"
PAD_TOKEN = "<pad>"
UNK_TOKEN = "<unk>"
START_VOCAB = [EOS_TOKEN, PAD_TOKEN, UNK_TOKEN]
def __init__(self):
self.word2index = {}
self.index2word = {}
self.word_freq = defaultdict(int)
self.total_words = 0
self.add_words(Vocab.START_VOCAB)
@property
def eos_value(self):
return self.encode_word(Vocab.EOS_TOKEN)
@property
def go_value(self):
return self.eos_value
@property
def pad_value(self):
return self.encode_word(Vocab.PAD_TOKEN)
@property
def unk_value(self):
return self.encode_word(Vocab.UNK_TOKEN)
def cut_by_freq(self, max_vocab_size):
"""Removes all words except `max_vocab_size` most frequent ones.
Args:
max_vocab_size (int): Target vocabulary size.
"""
for token in Vocab.START_VOCAB:
self.word_freq.pop(token, None)
self.word_freq = sorted(self.word_freq.items(), key=lambda x: x[1],
reverse=True)[:max_vocab_size - len(Vocab.START_VOCAB)]
self.word_freq = dict(self.word_freq)
for token in Vocab.START_VOCAB:
self.word_freq[token] = 1
self._id_word_mappings_from_word_freq()
def add_word(self, word, count=1):
if word not in self.word2index:
index = len(self.word2index)
self.word2index[word] = index
self.index2word[index] = word
self.word_freq[word] += count
def add_words(self, words):
for word in words:
self.add_word(word)
def encode_word(self, word):
if word not in self.word2index:
return self.word2index[Vocab.UNK_TOKEN]
else:
return self.word2index[word]
def encode_words(self, words, with_eos=False, with_go=False):
encoded = []
if with_go:
encoded.append(self.eos_value)
encoded.extend([self.encode_word(w) for w in words])
if with_eos:
encoded.append(self.eos_value)
return encoded
def decode_idx(self, index):
return self.index2word[index]
def decode_idxs(self, indices):
return [self.decode_idx(idx) for idx in indices]
def _id_word_mappings_from_word_freq(self):
words = self.word_freq.keys()
self.index2word = dict(enumerate(words))
self.word2index = {v: k for k, v in self.index2word.items()}
def __len__(self):
return len(self.word_freq)
def __contains__(self, item):
return item in self.word2index
class TextData:
def __init__(self, fname, line_process_fn=lambda x: x.strip(),
max_vocab_size=100000, max_len=100, verbose=10000):
"""Class for reading text data and making batches.
Args:
fname (str): File with data.
line_process_fn (callable): Line processing function (str -> str). Use it if you want
to do lemmatization or remove stopwords or smth. Default lambda x: x.strip()
max_vocab_size (int): Maximum vocabulary size. Most frequent words are used.
verbose (int): Verbosity level on reading data.
"""
self.verbose = verbose
self._logger = logging.getLogger(__name__)
self.fname = fname
self.max_len = max_len
self.max_vocab_size = max_vocab_size
self.line_process_fn = line_process_fn
self._check_args()
self.vocab = None
self.dataset = None
self.total_lines = None
self._build_vocabulary_and_stats()
self._build_dataset()
def _check_args(self):
import os
assert self.max_vocab_size > 0
assert os.path.isfile(self.fname)
def _build_vocabulary_and_stats(self):
"""Builds vocabulary, calculates maximum length and total number of
lines in file.
"""
with open(self.fname) as f:
self.vocab = Vocab()
self.total_lines = 0
for line in f:
tokens = self._tok_line(line)
tokens = tokens[:self.max_len-1] # cutting at maxlen (-1 because of pad token)
self.vocab.add_words(tokens)
self.total_lines += 1
if self.total_lines % self.verbose == 0:
self._logger.info("Read\t{0} lines.".format(
self.total_lines))
self.vocab.cut_by_freq(self.max_vocab_size)
self._logger.info("Done building vocab and stats.")
def _build_dataset(self):
"""Reads lines from file and encodes words.
"""
with open(self.fname) as f:
self.dataset = []
for line in f:
line = line.strip()
self.dataset.append(line)
def _tok_line(self, line):
"""Tokenizes raw line.
Args:
line (str): Raw line.
Returns:
tokens (list of str): List of tokens.
"""
return self.line_process_fn(line).split()
def encode_line(self, line, with_eos=False, with_go=False):
"""Encodes raw line to list of word indices. Applies ``line_process_fn`` before encoding.
Args:
line (str): Raw lines.
with_eos (bool): Whether to append eos_value at the end or not.
with_go (bool): Whether to append go_token in the beginning of line or not.
Returns:
encoded (list of ints): Encoded line.
"""
tokens = self._tok_line(line)
encoded = self.vocab.encode_words(tokens, with_eos, with_go)
return encoded
def encode_lines(self, lines, with_eos=False, with_go=False):
"""Encodes raw lines to list of word indices. Applies ``line_process_fn`` for each line.
Args:
lines (list of str): List of raw lines.
with_eos (bool): Whether to append eos_value at the end of each line or not.
with_go (bool): Whether to append go_token in the beginning of each line or not.
Returns:
encoded (list of list of ints): List of encoded lines.
"""
encoded = [self.encode_line(line, with_eos, with_go) for line in lines]
return encoded
def decode_line(self, encoded_line):
return self.vocab.decode_idxs(encoded_line)
def make_batch(self, encoded_lines, max_len=None):
"""Makes `Batch` instance based on `encoded_lines`.
Args:
encoded_lines (list of list of int): List of encoded lines. Encoded lines
can be obtained via ``encode_lines`` or ``encode_line`` methods.
max_len (int): If not None, lines will be padded up to max_len with vocab.pad_value.
Otherwise, lines will be padded using maximum length of line in ``encoded_lines``.
Returns:
batch (Batch): Batch instance.
"""
if not max_len:
max_len = min(max(map(len, encoded_lines)), self.max_len)
encoded_lines = [line[:max_len-1] for line in encoded_lines]
padded_lines = utils.pad_sequences(encoded_lines, max_len, self.vocab.pad_value)
batch = Batch(padded_lines, self.vocab.pad_value, self.vocab.go_value, self.vocab.eos_value)
return batch
def _make_triples_for_paragraph(self, paragraph):
if len(paragraph) < 3:
return [], [], []
prev = paragraph[:-2]
curr = paragraph[1:-1]
next = paragraph[2:]
return prev, curr, next
def make_triples(self, lines):
"""Returns prev, curr, next lists based on lines.
Context is not shared between different paragraphs in text. So, last line in one paragraph
will not be in context with first line in the next paragraph.
Paragraphs must be separated by '\n\n'
There will be asymmetric context for first and last lines.
Args:
lines (list of str): List of lines.
Returns:
prev, curr, next (tuple of list of str):
"""
idxs = [-1]+list(filter(None, [i if len(lines[i]) == 0 else None for i in range(len(lines))]))+[len(lines)]
all_prev, all_curr, all_next = [], [], []
for start, end in zip(idxs[:-1], idxs[1:]):
tmp_prev, tmp_curr, tmp_next = self._make_triples_for_paragraph(lines[start+1:end])
if tmp_prev == [] or tmp_curr == [] or tmp_next == []:
continue
all_prev.extend(tmp_prev)
all_curr.extend(tmp_curr)
all_next.extend(tmp_next)
return all_prev, all_curr, all_next
def triples_data_iterator(self, prev_data, curr_data, next_data, max_len,
batch_size=64, shuffle=False):
"""Creates iterator for (current sentence, prev sentence, next sentence)
data. Is is useful for training skip-thought vectors.
Args:
curr_data (list of lists of ints): List with raw lines which corresponds to current sentences.
Lines can be with different lengths. They will be encoder inputs.
prev_data (list of lists of ints): List with raw previous
lines. Lines can be with different lengths.
next_data (list of lists of ints): List with raw next lines.
Lines can be with different lengths.
max_len (int): Maximum length for padding previous and next sentences.
batch_size (int): Size of batch.
shuffle (bool): Whether to shuffle data or not.
Yields:
enc_inp, prev_inp, prev_targ, next_inp, next_targ (Batch)
"""
if shuffle:
indices = np.random.permutation(len(curr_data))
curr_data = [curr_data[i] for i in indices]
prev_data = [prev_data[i] for i in indices]
next_data = [next_data[i] for i in indices]
total_processed_examples = 0
total_steps = int(np.ceil(len(curr_data)) / float(batch_size))
for step in range(total_steps+1):
batch_start = step * batch_size
curr = curr_data[batch_start:batch_start + batch_size]
prev = prev_data[batch_start:batch_start + batch_size]
next = next_data[batch_start:batch_start + batch_size]
enc_inp = self.make_batch(self.encode_lines(curr))
prev_inp = self.make_batch(self.encode_lines(prev, with_go=True), max_len)
prev_targ = self.make_batch(self.encode_lines(prev, with_eos=True), max_len)
next_inp = self.make_batch(self.encode_lines(next, with_go=True), max_len)
next_targ = self.make_batch(self.encode_lines(next, with_eos=True), max_len)
assert prev_inp.shape == prev_targ.shape == next_inp.shape == next_targ.shape, (prev, curr, next)
yield enc_inp, prev_inp, prev_targ, next_inp, next_targ
total_processed_examples += len(curr)
if total_processed_examples == len(curr_data):
break
# Sanity check to make sure we iterated over all the dataset as intended
assert total_processed_examples == len(curr_data), \
'Expected {} and processed {}'.format(len(curr_data),
total_processed_examples)
@staticmethod
def save(textdata, fname):
with open(fname, 'wb') as fout:
dill.dump(textdata, fout)
@staticmethod
def load(fname):
with open(fname, 'rb') as fin:
return dill.load(fin) | {
"content_hash": "cdcf896d7d2bf8356c30d66ba97be12d",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 115,
"avg_line_length": 36.48870056497175,
"alnum_prop": 0.5841913757064334,
"repo_name": "persiyanov/skip-thought-tf",
"id": "bc8d06d9805a3c69f1c0728bf336d4ec6fcc02b8",
"size": "12917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skipthought/data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35040"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .fastq_screen_module import MultiqcModule | {
"content_hash": "1a87cee4d11394e93eee2eba3b648d58",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 46,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.813953488372093,
"repo_name": "moonso/MultiQC",
"id": "e95b17cf94c9b714a8c4fe9712c2036762bbddc8",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multiqc/modules/fastq_screen/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11852"
},
{
"name": "HTML",
"bytes": "12484"
},
{
"name": "JavaScript",
"bytes": "57813"
},
{
"name": "Python",
"bytes": "136764"
}
],
"symlink_target": ""
} |
import tempfile
import unittest
from pyspark.ml import Estimator, Model
from pyspark.ml.classification import LogisticRegression, LogisticRegressionModel, OneVsRest
from pyspark.ml.evaluation import BinaryClassificationEvaluator, \
MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.linalg import Vectors
from pyspark.ml.param import Param, Params
from pyspark.ml.tuning import CrossValidator, CrossValidatorModel, ParamGridBuilder, \
TrainValidationSplit, TrainValidationSplitModel
from pyspark.sql.functions import rand
from pyspark.testing.mlutils import SparkSessionTestCase
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(SparkSessionTestCase):
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvCopied = cv.copy()
self.assertEqual(cv.getEstimator().uid, cvCopied.getEstimator().uid)
cvModel = cv.fit(dataset)
cvModelCopied = cvModel.copy()
for index in range(len(cvModel.avgMetrics)):
self.assertTrue(abs(cvModel.avgMetrics[index] - cvModelCopied.avgMetrics[index])
< 0.0001)
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_param_grid_type_coercion(self):
lr = LogisticRegression(maxIter=10)
paramGrid = ParamGridBuilder().addGrid(lr.regParam, [0.5, 1]).build()
for param in paramGrid:
for v in param.values():
assert(type(v) == float)
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for CrossValidator will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
lrModel = cvModel.bestModel
cvModelPath = temp_path + "/cvModel"
lrModel.save(cvModelPath)
loadedLrModel = LogisticRegressionModel.load(cvModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cv.setParallelism(1)
cvSerialModel = cv.fit(dataset)
cv.setParallelism(2)
cvParallelModel = cv.fit(dataset)
self.assertEqual(cvSerialModel.avgMetrics, cvParallelModel.avgMetrics)
def test_expose_sub_models(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
numFolds = 3
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
numFolds=numFolds, collectSubModels=True)
def checkSubModels(subModels):
self.assertEqual(len(subModels), numFolds)
for i in range(numFolds):
self.assertEqual(len(subModels[i]), len(grid))
cvModel = cv.fit(dataset)
checkSubModels(cvModel.subModels)
# Test the default value for option "persistSubModel" to be "true"
testSubPath = temp_path + "/testCrossValidatorSubModels"
savingPathWithSubModels = testSubPath + "cvModel3"
cvModel.save(savingPathWithSubModels)
cvModel3 = CrossValidatorModel.load(savingPathWithSubModels)
checkSubModels(cvModel3.subModels)
cvModel4 = cvModel3.copy()
checkSubModels(cvModel4.subModels)
savingPathWithoutSubModels = testSubPath + "cvModel2"
cvModel.write().option("persistSubModels", "false").save(savingPathWithoutSubModels)
cvModel2 = CrossValidatorModel.load(savingPathWithoutSubModels)
self.assertEqual(cvModel2.subModels, None)
for i in range(numFolds):
for j in range(len(grid)):
self.assertEqual(cvModel.subModels[i][j].uid, cvModel3.subModels[i][j].uid)
def test_save_load_nested_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
originalParamMap = cv.getEstimatorParamMaps()
loadedParamMap = loadedCV.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
class TrainValidationSplitTests(SparkSessionTestCase):
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(0.0, min(validationMetrics))
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(1.0, max(validationMetrics))
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
lrModel = tvsModel.bestModel
tvsModelPath = temp_path + "/tvsModel"
lrModel.save(tvsModelPath)
loadedLrModel = LogisticRegressionModel.load(tvsModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
self.assertEqual(loadedTvs.getEstimatorParamMaps(), tvs.getEstimatorParamMaps())
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvs.setParallelism(1)
tvsSerialModel = tvs.fit(dataset)
tvs.setParallelism(2)
tvsParallelModel = tvs.fit(dataset)
self.assertEqual(tvsSerialModel.validationMetrics, tvsParallelModel.validationMetrics)
def test_expose_sub_models(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
collectSubModels=True)
tvsModel = tvs.fit(dataset)
self.assertEqual(len(tvsModel.subModels), len(grid))
# Test the default value for option "persistSubModel" to be "true"
testSubPath = temp_path + "/testTrainValidationSplitSubModels"
savingPathWithSubModels = testSubPath + "cvModel3"
tvsModel.save(savingPathWithSubModels)
tvsModel3 = TrainValidationSplitModel.load(savingPathWithSubModels)
self.assertEqual(len(tvsModel3.subModels), len(grid))
tvsModel4 = tvsModel3.copy()
self.assertEqual(len(tvsModel4.subModels), len(grid))
savingPathWithoutSubModels = testSubPath + "cvModel2"
tvsModel.write().option("persistSubModels", "false").save(savingPathWithoutSubModels)
tvsModel2 = TrainValidationSplitModel.load(savingPathWithoutSubModels)
self.assertEqual(tvsModel2.subModels, None)
for i in range(len(grid)):
self.assertEqual(tvsModel.subModels[i].uid, tvsModel3.subModels[i].uid)
def test_save_load_nested_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
tvs = TrainValidationSplit(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
originalParamMap = tvs.getEstimatorParamMaps()
loadedParamMap = loadedTvs.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsCopied = tvs.copy()
tvsModelCopied = tvsModel.copy()
self.assertEqual(tvs.getEstimator().uid, tvsCopied.getEstimator().uid,
"Copied TrainValidationSplit has the same uid of Estimator")
self.assertEqual(tvsModel.bestModel.uid, tvsModelCopied.bestModel.uid)
self.assertEqual(len(tvsModel.validationMetrics),
len(tvsModelCopied.validationMetrics),
"Copied validationMetrics has the same size of the original")
for index in range(len(tvsModel.validationMetrics)):
self.assertEqual(tvsModel.validationMetrics[index],
tvsModelCopied.validationMetrics[index])
if __name__ == "__main__":
from pyspark.ml.tests.test_tuning import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "b1e49308dfbfa7bce605f4bd4460acc7",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 95,
"avg_line_length": 42.42125237191651,
"alnum_prop": 0.6199677938808373,
"repo_name": "mdespriee/spark",
"id": "39bb921aaf43dc0f5208cfd6bb1148129eecb740",
"size": "23141",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/tests/test_tuning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "35100"
},
{
"name": "Batchfile",
"bytes": "30468"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26888"
},
{
"name": "Dockerfile",
"bytes": "8760"
},
{
"name": "HTML",
"bytes": "70229"
},
{
"name": "HiveQL",
"bytes": "1823426"
},
{
"name": "Java",
"bytes": "3442889"
},
{
"name": "JavaScript",
"bytes": "196727"
},
{
"name": "Makefile",
"bytes": "9397"
},
{
"name": "PLpgSQL",
"bytes": "191716"
},
{
"name": "PowerShell",
"bytes": "3856"
},
{
"name": "Python",
"bytes": "2839049"
},
{
"name": "R",
"bytes": "1149818"
},
{
"name": "Roff",
"bytes": "15677"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "27840431"
},
{
"name": "Shell",
"bytes": "203155"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
} |
"""
Chat Room Demo for Miniboa.
"""
from miniboa import TelnetServer
IDLE_TIMEOUT = 300
CLIENT_LIST = []
SERVER_RUN = True
def on_connect(client):
"""
Sample on_connect function.
Handles new connections.
"""
print "++ Opened connection to %s" % client.addrport()
broadcast('%s joins the conversation.\n' % client.addrport() )
CLIENT_LIST.append(client)
client.send("Welcome to the Chat Server, %s.\n" % client.addrport() )
def on_disconnect(client):
"""
Sample on_disconnect function.
Handles lost connections.
"""
print "-- Lost connection to %s" % client.addrport()
CLIENT_LIST.remove(client)
broadcast('%s leaves the conversation.\n' % client.addrport() )
def kick_idle():
"""
Looks for idle clients and disconnects them by setting active to False.
"""
## Who hasn't been typing?
for client in CLIENT_LIST:
if client.idle() > IDLE_TIMEOUT:
print('-- Kicking idle lobby client from %s' % client.addrport())
client.active = False
def process_clients():
"""
Check each client, if client.cmd_ready == True then there is a line of
input available via client.get_command().
"""
for client in CLIENT_LIST:
if client.active and client.cmd_ready:
## If the client sends input echo it to the chat room
chat(client)
def broadcast(msg):
"""
Send msg to every client.
"""
for client in CLIENT_LIST:
client.send(msg)
def chat(client):
"""
Echo whatever client types to everyone.
"""
global SERVER_RUN
msg = client.get_command()
print '%s says, "%s"' % (client.addrport(), msg)
for guest in CLIENT_LIST:
if guest != client:
guest.send('%s says, %s\n' % (client.addrport(), msg))
else:
guest.send('You say, %s\n' % msg)
cmd = msg.lower()
## bye = disconnect
if cmd == 'bye':
client.active = False
## shutdown == stop the server
elif cmd == 'shutdown':
SERVER_RUN = False
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
if __name__ == '__main__':
## Simple chat server to demonstrate connection handling via the
## async and telnet modules.
## Create a telnet server with a port, address,
## a function to call with new connections
## and one to call with lost connections.
telnet_server = TelnetServer(
port=7777,
address='',
on_connect=on_connect,
on_disconnect=on_disconnect,
timeout = .05
)
print(">> Listening for connections on port %d. CTRL-C to break."
% telnet_server.port)
## Server Loop
while SERVER_RUN:
telnet_server.poll() ## Send, Recv, and look for new connections
kick_idle() ## Check for idle clients
process_clients() ## Check for client input
print(">> Server shutdown.")
| {
"content_hash": "02f40eefe83227a052462526a4b5dcec",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 26.5,
"alnum_prop": 0.5637605725439168,
"repo_name": "tectronics/miniboa",
"id": "42666e6167449056db050dd8176f1929b05e1612",
"size": "3854",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "chat_demo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44065"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from django.http import QueryDict
from django.utils.http import urlencode
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import unquote_plus
from paypal.standard.models import PayPalStandardBase
from paypal.standard.conf import POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
from paypal.standard.pdt.signals import pdt_successful, pdt_failed
# ### Todo: Move this logic to conf.py:
# if paypal.standard.pdt is in installed apps
# ... then check for this setting in conf.py
class PayPalSettingsError(Exception):
"""Raised when settings are incorrect."""
try:
IDENTITY_TOKEN = settings.PAYPAL_IDENTITY_TOKEN
except:
raise PayPalSettingsError(
"You must set PAYPAL_IDENTITY_TOKEN in settings.py. Get this token by enabling PDT in your PayPal account.")
class PayPalPDT(PayPalStandardBase):
format = u"<PDT: %s %s>"
amt = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
cm = models.CharField(max_length=255, blank=True)
sig = models.CharField(max_length=255, blank=True)
tx = models.CharField(max_length=255, blank=True)
st = models.CharField(max_length=32, blank=True)
class Meta:
db_table = "paypal_pdt"
verbose_name = "PayPal PDT"
def _postback(self):
"""
Perform PayPal PDT Postback validation.
Sends the transaction ID and business token to PayPal which responses with
SUCCESS or FAILED.
"""
postback_dict = dict(cmd="_notify-synch", at=IDENTITY_TOKEN, tx=self.tx)
postback_params = urlencode(postback_dict)
return urlopen(self.get_endpoint(), postback_params).read()
def get_endpoint(self):
"""Use the sandbox when in DEBUG mode as we don't have a test_ipn variable in pdt."""
if getattr(settings, 'PAYPAL_DEBUG', settings.DEBUG):
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def _verify_postback(self):
# ### Now we don't really care what result was, just whether a flag was set or not.
from paypal.standard.pdt.forms import PayPalPDTForm
# TODO: this needs testing and probably fixing under Python 3
result = False
response_list = self.response.split('\n')
response_dict = {}
for i, line in enumerate(response_list):
unquoted_line = unquote_plus(line).strip()
if i == 0:
self.st = unquoted_line
if self.st == "SUCCESS":
result = True
else:
if self.st != "SUCCESS":
self.set_flag(line)
break
try:
if not unquoted_line.startswith(' -'):
k, v = unquoted_line.split('=')
response_dict[k.strip()] = v.strip()
except ValueError:
pass
qd = QueryDict('', mutable=True)
qd.update(response_dict)
qd.update(dict(ipaddress=self.ipaddress, st=self.st, flag_info=self.flag_info, flag=self.flag,
flag_code=self.flag_code))
pdt_form = PayPalPDTForm(qd, instance=self)
pdt_form.save(commit=False)
def send_signals(self):
# Send the PDT signals...
if self.flag:
pdt_failed.send(sender=self)
else:
pdt_successful.send(sender=self)
| {
"content_hash": "94974aecc1ddc0f11dc87a981cef0a4a",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 116,
"avg_line_length": 37.212765957446805,
"alnum_prop": 0.6246426529445397,
"repo_name": "caleb/idea-color-themes",
"id": "9ad330342da81de82fabf98ed6a54272d8d84267",
"size": "3544",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "paypal/standard/pdt/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128643"
},
{
"name": "HTML",
"bytes": "44913"
},
{
"name": "JavaScript",
"bytes": "994"
},
{
"name": "Python",
"bytes": "246659"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("MLPClassifier" , "FourClass_500" , "hive")
| {
"content_hash": "bf0553d611f84db219f617b85e2a3164",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35,
"alnum_prop": 0.7785714285714286,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "57499ab98ebab0443c5fcb95fcdb3f74d86faf87",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/FourClass_500/ws_FourClass_500_MLPClassifier_hive_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import version
version.append ('$Revision: 89494 $')
del version
import ktl
import os
import time
import tkFileDialog
import Tkinter
from Tkinter import N, E, S, W
import traceback
import Color
import Font
import Event
import Log
import Popup as kPopup # avoid name conflict with class Popup below
import Value
description = None
directory = '~/observers'
extension = None
mode = 0744
preamble = None
prefix = ''
def setDirectory (path):
if os.path.isabs (path):
pass
else:
path = os.path.expanduser (path)
if os.path.isdir (path):
pass
else:
raise ValueError, "directory does not exist: %s" % (path)
global directory
directory = path
try:
setDirectory (directory)
except ValueError:
directory = None
def setDescription (string):
global description
description = string
def setExtension (string):
global extension
extension = string
def setPreamble (string):
global preamble
preamble = string
def setPrefix (string):
global prefix
prefix = string
def shortname (filename):
''' Return the base setup filename with no extension.
If an absolute path is provided, the directory
components will be removed.
'''
extension_length = len (extension)
filename = os.path.basename (filename)
shortname = filename[:-extension_length]
return shortname
class Bar:
def __init__ (self, main):
frame = Tkinter.Frame (master = main.top,
background = Color.menu,
borderwidth = 0,
relief = 'solid')
menu = Menu (master=frame, main=main)
label = Value.WhiteLabel (master=frame, text='Instrument setups')
self.main = main
self.menu = menu
self.frame = frame
self.label = label
self.grid = frame.grid
self.grid_remove = frame.grid_remove
frame.columnconfigure (0, weight=0)
frame.columnconfigure (1, weight=1)
frame.rowconfigure (0, weight=0)
label.grid (row=0, column=0, padx=(7,0), pady=3, sticky=W)
menu.grid (row=0, column=1, padx=3, sticky=W)
# Need to shift the frames about in the Main.Window.
main.frame.grid (row=1)
self.frame.grid (row=0, column=0, sticky=N+E+S+W)
main.top.rowconfigure (0, weight=0)
main.top.rowconfigure (1, weight=1)
# end of class Bar
class Setup:
''' This is a helper class to encapsulate the logic asserting
whether or not a given setup matches the active state of
the instrument. Almost as a side effect, it retains the
original content of the setup, and can distribute its
choices to the Main.Window.
'''
numeric = {'KTL_DOUBLE': True, 'KTL_FLOAT': True, 'KTL_INT': True}
def __init__ (self, filename, main, watch=True):
self.main = main
self.filename = filename
self.choices = {}
self.watching = {}
self.loadSetup ()
if watch == True:
self.watch ()
def match (self):
''' Return True if the current available state matches
the saved setup.
'''
if len (self.watching) == 0:
raise RuntimeError, "can't match() a setup without first invoking watch()"
for keyword in self.keywords ():
if keyword['populated'] == False:
return False
new = keyword['ascii']
target = self.watching[keyword]
if keyword['type'] in self.numeric:
new = float (new)
if keyword['type'] == 'KTL_INT':
new = int (new)
else:
new = new.lower ()
if new != target:
return False
return True
def keywords (self):
''' Provide a tuple containing all the keywords of interest
to this Setup.
'''
return self.watching.keys ()
def loadSetup (self):
if self.filename == None:
return
prefix_length = len (prefix)
setup = open (self.filename, 'r')
contents = setup.read ()
setup.close ()
lines = contents.split ('\n')
for line in lines:
if line[:prefix_length] == prefix:
choice = {}
components = line.split ()
keyword = components[1]
# If a keyword choice includes embedded
# whitespace, it got butchered with the
# line.split () above.
if len (components) > 3:
# Remove leading prefix.
choice = line[prefix_length:]
choice = choice.strip ()
# Remove leading keyword name, which
# may include the service name.
choice = choice[len (keyword):]
choice = choice.strip ()
else:
# No embedded whitespace.
choice = components[2]
# New setup files have the service
# name embedded in the 'keyword' field,
# in the form 'service.KEYWORD'. Old files
# only specify the keyword.
service = None
if '.' in keyword:
service,keyword = keyword.split ('.', 1)
service = service.strip ()
keyword = keyword.strip ()
if service == '' or keyword == '':
# Badly formed pair. Assume
# it is just a keyword.
service = None
keyword = components[1]
# If no service is specified, it is
# implied that the main.service is
# appropriate.
if service == None:
service = self.main.service.name
if service in self.choices:
pass
else:
self.choices[service] = {}
self.choices[service][keyword] = choice
def watch (self, expand=True):
''' Ensure that all Keyword objects associated with this
setup are being monitored. We don't need to register
any callbacks, because state is only inspected when
self.match () is invoked.
If expand is set to True, and if the SWT keyword exists
for the relevant stage (if any), monitoring of RAW will
be replaced by TRG, VAL by TVA, and VAX by TVX.
'''
services = self.main.services
for service in self.choices.keys ():
if service in services:
pass
else:
ktl.log (ktl.LOG_INFO, "GUI: unknown service '%s' used in setup '%s'" % (service, self.filename))
continue
for keyword in self.choices[service].keys ():
if expand == True:
# Swap out the 'actual' keyword for the
# 'target', and add in monitoring of
# the 'sweet' keyword that indicates
# whether the actual value is still
# within tolerance of the target.
# If either keyword does not exist,
# fall back to using the original
# keyword.
prefix = keyword[:-3]
suffix = keyword[-3:]
sweet = '%sSWT' % (prefix)
if sweet in services[service]:
if suffix == 'RAW':
new = "%s%s" % (prefix, 'TRG')
elif suffix == 'VAL':
new = "%s%s" % (prefix, 'TVA')
elif suffix == 'VAX':
new = "%s%s" % (prefix, 'TVX')
else:
new = None
if new != None:
# If the 'new' keyword
# exists, use it instead
# of the original.
if new in services[service]:
self.choices[service][new] = self.choices[service][keyword]
keyword = new
else:
new = None
if new != None:
# We're using the new
# keyword name. Append
# the 'sweet' keyword
# to the set of keywords
# being watched. String
# matches are lower
# case.
sweet = services[service][sweet]
self.watching[sweet] = 'true'
if sweet['monitored'] == False:
sweet.monitor (wait=False)
if keyword in services[service]:
choice = self.choices[service][keyword]
keyword = services[service][keyword]
else:
ktl.log (ktl.LOG_INFO, "GUI: unknown keyword '%s' used in setup '%s'" % (keyword, self.filename))
continue
target = choice
if keyword['type'] in self.numeric:
target = float (target)
if keyword['type'] == 'KTL_INT':
target = int (target)
else:
target = target.lower ()
self.watching[keyword] = target
if keyword['monitored'] == False:
keyword.monitor (wait=False)
def apply (self):
''' Iterate through all Info boxes in the Main.Window, and
apply the saved values as 'choices' for any Info box
that matches the designated keyword. It is not expected,
but also not assumed otherwise, that a saved value is
relevant for more than one Info box.
'''
for box in self.main.info_boxes:
# Skip hidden boxes.
if box.hidden == True:
continue
if hasattr (box, 'value'):
value = box.value
if hasattr (value, 'keywords'):
keywords = value.keywords ()
else:
keywords = ()
for keyword in keywords:
service = keyword['service']
keyword = keyword['name']
try:
choice = self.choices[service][keyword]
except KeyError:
continue
value.choose (choice)
# end of class Setup
class Menu (Value.ArbitraryMenu):
''' Menu object used with instrument setups. The 'active' setup,
if any, will be the displayed value; available setup choices for
a specific setups directory will be displayed in the menu cascade.
One menu choice at the end will be to select a new setups
directory. The act of choosing a new directory will cause the menu
to repopulate.
'''
def __init__ (self, *args, **kwargs):
kwargs['go'] = False
Value.ArbitraryMenu.__init__ (self, *args, **kwargs)
self.directory_timestamp = None
self.file_timestamps = None
self.check_delay = Event.delay * 20
self.directory_check = None
self.file_check = None
self.clear_at_noon = None
self.directory = None
self.filenames = ()
self.fullpaths = ()
self.shortnames = ()
self.setups = {}
self.matches = {}
self.watching = {}
self.last_choice = None
self.buildMenu (())
self.redraw ()
def choose (self, choice):
''' A 'choice' for a Menu is an instrument setup.
For the chosen setup, apply its individual settings
to the rest of the GUI.
'''
# Main.Window.clearChoices() will invoke choose (None).
# Don't do anything with it, because we never have a
# choice set, we only use overrides.
if choice == None:
return
self.setups[choice].apply ()
if self.last_choice != choice:
self.last_choice = choice
Event.queue (self.redraw)
def receiveCallback (self, keyword):
''' Check any known setups for interest in this keyword.
If there are any, check for matches.
'''
if keyword in self.watching:
setups = self.watching[keyword]
else:
# Shouldn't happen. We should only get callbacks
# for keywords that have watchers associated with
# them.
return
changed = False
for setup in setups:
match = setup.match ()
if match == False and setup in self.matches:
del self.matches[setup]
changed = True
elif match == True:
if setup in self.matches:
pass
else:
short = shortname (setup.filename)
self.matches[setup] = short
changed = True
if changed == True:
Event.queue (self.redraw)
def setMenu (self, menu_items):
''' Add options to the end of the menu to manipulate
the displayed list of setups.
'''
# Putting an empty tuple in the list indicates the desire
# for a menu separator.
menu_items.append (())
if self.directory != None:
label = 'Clear setup list'
command = self.forgetDirectory
menu_items.append ((label, command))
label = 'Load new setup file...'
command = self.chooseFile
menu_items.append ((label, command))
# Different label for the next menu item.
label = 'Choose new directory...'
else:
label = 'Load setup file...'
command = self.chooseFile
menu_items.append ((label, command))
# Different label for the next menu item.
label = 'Choose directory...'
command = self.chooseDirectory
menu_items.append ((label, command))
label = 'Save new setup...'
command = self.saveSetup
menu_items.append ((label, command))
return Value.ArbitraryMenu.setMenu (self, menu_items)
def redraw (self):
override = None
# Determine just how much room we have, in characters.
width = self.master.winfo_width ()
size = Font.display.cget ('size')
characters = int (width / size)
# Lop off an arbitrary number of characters to allow
# for a label on the left side.
characters = characters - 20
# Track which setups are already in the displayed value,
# since we might approach them out of order later on.
included = []
if self.last_choice != None and self.last_choice in self.setups:
setup = self.setups[self.last_choice]
if setup in self.matches:
override = self.last_choice
included.append (self.last_choice)
if override == None and len (self.matches) != 0:
# Since the user's last chosen setup is not
# currently a match, grab any old match and
# display it.
matches = self.matches.values ()
matches.sort ()
override = matches[0]
included.append (matches[0])
if override != None and len (self.matches) > 1:
# Show more than one name if we have enough
# space. Leave enough room for the '+# more'.
maximum = characters - 7
displayed = 1
for name in self.matches.values ():
if len (override) >= maximum:
break
if name in included:
continue
if len (override) + len (name) + 2 > maximum:
continue
override = "%s, %s" % (override, name)
included.append (name)
displayed += 1
remainder = len (self.matches) - displayed
if remainder > 0:
remainder = "+%d more" % (remainder)
if len (included) > 1:
override = "%s, %s" % (override, remainder)
else:
override = "%s %s" % (override, remainder)
if override == None:
self.setOverride ('None')
else:
self.setOverride (override)
# Having determined what the displayed value should
# be, redraw.
Value.ArbitraryMenu.redraw (self)
# Now that the menu is guaranteed to be fully established,
# highlight the matching setup(s) in the menu. If there are
# no matches, ensure that nothing is highlighted.
matches = self.matches.values ()
index = 0
while True:
try:
label = self.menu.entrycget (index, 'label')
except:
# Tried to get the label on a separator.
# There is only one separator in this menu,
# and there are no setups listed beyond it.
break
background = self.menu.entrycget (index, 'background')
if label in matches:
if background != Color.selection:
self.menu.entryconfigure (index, background=Color.selection)
elif background != Color.white:
self.menu.entryconfigure (index, background=Color.white)
index += 1
def setDirectory (self, directory):
if directory == self.directory:
return False
# Initialize our contents for this new directory.
self.directory_timestamp = None
self.directory = directory
self.last_choice = None
self.checkDirectory ()
# If it is after noon local time, or before 6AM, set a
# timed event to clear the list of displayed setups
# at noon.
if self.clear_at_noon == False:
# Do no clearing.
pass
elif directory != None:
now = time.localtime ()
hour = now.tm_hour
if hour < 6 or hour >= 12:
hours = 12 - hour
if hour >= 12:
hours = hours + 24
# Add an extra minute to the timer to ensure
# that the expiration happens after the clock
# strikes 12.
minutes = hours * 60 - now.tm_min + 1
seconds = minutes * 60
milliseconds = seconds * 1000
self.clear_at_noon = self.after (milliseconds, self.forgetDirectory)
elif self.clear_at_noon != None:
# No directory active, there isn't anything to clear.
self.after_cancel (self.clear_at_noon)
self.clear_at_noon = None
return True
def forgetDirectory (self, *ignored):
''' Establish a clean slate for the menu.
'''
self.setDirectory (None)
self.clearSetups ()
self.last_choice = None
Event.queue (self.redraw)
def checkDirectory (self):
''' Check the timestamp on the directory. If it has
changed, reload the file list.
'''
if self.directory == None:
self.directory_timestamp = None
if self.directory_check != None:
self.after_cancel (self.directory_check)
self.directory_check = None
if self.file_check != None:
self.after_cancel (self.file_check)
self.file_check = None
return
times = os.stat (self.directory)
mtime = times.st_mtime
if mtime != self.directory_timestamp:
self.directory_timestamp = mtime
self.loadDirectory ()
# loadDirectory () cleared the menu. Put it back.
self.buildMenu (self.shortnames)
self.checkFiles ()
# Check for matches.
for keyword in self.watching.keys ():
self.receiveCallback (keyword)
# If there weren't any matches, we need to redraw the
# menu to initialize its display.
if len (self.matches) == 0:
Event.queue (self.redraw)
if self.directory_check != None:
self.after_cancel (self.directory_check)
self.directory_check = self.after (self.check_delay, self.checkDirectory)
def loadDirectory (self):
self.clearSetups ()
if self.directory == None:
return
files = os.listdir (self.directory)
files.sort ()
self.file_timestamps = {}
filenames = []
fullpaths = []
shortnames = []
tag = extension
tag_length = len (extension)
for file in files:
# Skip non-setup files.
if file[-tag_length:] != tag:
continue
fullpath = os.path.join (self.directory, file)
short = shortname (file)
filenames.append (file)
fullpaths.append (fullpath)
shortnames.append (short)
self.filenames = filenames
self.fullpaths = fullpaths
self.shortnames = shortnames
if len (shortnames) == 0:
self.forgetDirectory ()
def checkFiles (self):
''' Check the known timestamp for all the setup files we're
tracking, and stat the file to look for any changes.
In the event that there are changes, reload the setup
file.
'''
if self.directory == None:
if self.file_check != None:
self.after_cancel (self.file_check)
self.file_check = None
return
redraw = False
for file in self.fullpaths:
try:
times = os.stat (file)
except OSError:
# File is gone. This will get handled by
# checkDirectory() since the directory
# mtime changed.
continue
mtime = times.st_mtime
if file in self.file_timestamps:
if mtime != self.file_timestamps[file]:
try:
self.loadSetup (file)
except:
self.invalidSetup (fullpath=file)
Log.error ("Error loading '%s':\n%s" % (file, traceback.format_exc ()))
continue
redraw = True
else:
try:
self.loadSetup (file)
except:
self.invalidSetup (fullpath=file)
Log.error ("Error loading '%s':\n%s" % (file, traceback.format_exc ()))
continue
redraw = True
if redraw == True:
Event.queue (self.redraw)
if self.file_check != None:
self.after_cancel (self.file_check)
self.file_check = self.after (self.check_delay, self.checkFiles)
def invalidSetup (self, filename=None, fullpath=None, shortname=None):
''' Remove a specific setup from the menu. This is only
invoked by self.checkFiles() in the event that
a setup file does not successfully load.
'''
if filename != None:
index = self.filenames.index (filename)
elif fullpath != None:
index = self.fullpaths.index (fullpath)
elif shortname != None:
index = self.shortnames.index (shortname)
else:
raise RuntimeError, 'cannot invoke invalidSetup() with no arguments'
filename = self.filenames[index]
Log.alert ("Invalid setup file: %s" % (filename))
self.filenames.pop (index)
self.fullpaths.pop (index)
self.shortnames.pop (index)
self.buildMenu (self.shortnames)
self.redraw ()
def clearSetups (self):
''' Remove all known setups, and clean up any lingering
callbacks.
'''
if self.directory_check != None:
self.after_cancel (self.directory_check)
self.directory_check = None
if self.file_check != None:
self.after_cancel (self.file_check)
self.file_check = None
self.matches = {}
self.filenames = ()
self.fullpaths = ()
for shortname in self.shortnames:
setup = self.setups[shortname]
del (self.setups[shortname])
self.shortnames = ()
for keyword in self.watching.keys ():
keyword.callback (self.receiveCallback, remove=True)
del (self.watching[keyword])
# Having tidied up everything else, build and empty menu.
self.buildMenu (())
def chooseDirectory (self, *ignored):
if description == None:
raise RuntimeError, 'Setups.description must be defined in order to work with setups'
if self.directory == None:
# Use the module default.
initial = directory
else:
initial = self.directory
name = tkFileDialog.askdirectory (title='Select a directory containing %s' % (description),
initialdir=initial,
mustexist=True)
# And, the results...
if name == None or name == () or name == '':
# No directory chosen, maybe they hit cancel. That's OK.
return
changed = self.setDirectory (name)
def chooseFile (self, *ignored):
if description == None:
raise RuntimeError, 'Setups.description must be defined in order to work with setups'
if extension == None:
raise RuntimeError, 'Setups.extension must be defined in order to work with setups'
if prefix == None:
raise RuntimeError, 'Setups.prefix must be defined in order to work with setups'
if self.directory == None:
initial = directory
else:
initial = self.directory
filename = tkFileDialog.askopenfilename (title='Select a %s file' % (extension),
defaultextension=extension,
initialdir=initial,
filetypes=[(description, extension), ('all', '.*')],
multiple=False)
# And, the results...
if filename == None or filename == () or filename == '':
# No file chosen, maybe they hit cancel. That's OK.
return
basedir = os.path.dirname (filename)
changed = self.setDirectory (basedir)
redraw = False
short = shortname (filename)
if self.last_choice != short:
self.last_choice = short
redraw = True
if changed == False and redraw == True:
# Same directory, but the user selected a different
# file as their choice. Update the display.
Event.queue (self.redraw)
if self.last_choice != None:
self.setups[self.last_choice].apply ()
def saveSetup (self, *ignored):
Popup (self.main)
def loadSetup (self, file):
times = os.stat (file)
mtime = times.st_mtime
short = shortname (file)
if short in self.setups:
old = self.setups[short]
try:
del (self.matches[old])
except KeyError:
pass
for keyword in self.watching.keys ():
if old in self.watching[keyword]:
self.watching[keyword].remove (old)
self.file_timestamps[file] = mtime
setup = Setup (file, self.main)
self.setups[short] = setup
keywords = setup.keywords ()
for keyword in keywords:
if keyword in self.watching:
pass
else:
self.watching[keyword] = []
keyword.callback (self.receiveCallback)
self.watching[keyword].append (setup)
# Check for matches.
self.receiveCallback (keyword)
# end of class Menu
class Popup (kPopup.Selection):
def __init__ (self, main):
kPopup.Selection.__init__ (self, main)
self.top.title ('Select values for saved setup')
self.frame.rowconfigure (0, weight=0)
self.action_button['text'] = 'Save as...'
self.action_button['command'] = self.save
self.checkButtons ()
# Go back through the set of discovered keyword values,
# and do intelligent substitution of RAW/VAL/VAX for
# their respective 'target' keyword, TRG/TVA/TVX.
for row in self.rows:
# Skip rows whose value was set via an active
# choice, which is already effectively a target
# value.
if row.origin == 'choice':
continue
keyword = row.keyword
if isinstance (keyword, ktl.Keyword):
pass
else:
continue
suffix = keyword['name'][-3:]
if suffix == 'RAW' or suffix == 'VAL' or suffix == 'VAX':
prefix = keyword['name'][:-3]
service = keyword['service']
service = self.main.services[service]
if suffix == 'RAW':
target = 'TRG'
elif suffix == 'VAL':
target = 'TVA'
elif suffix == 'VAX':
target = 'TVX'
target = "%s%s" % (prefix, target)
sweet = "%s%s" % (prefix, 'SWT')
if target in service and sweet in service:
sweet = service[sweet]
if sweet['populated'] == True and \
sweet['monitored'] == True:
sweet = sweet['binary']
else:
sweet = sweet.read (binary=True)
if sweet == False:
# Not in tolerance. Don't store
# the target value, store the
# actual value. Go to the next
# SelectRow.
continue
else:
# No keyword to indicate that the stage
# is still within tolerance, or no
# target keyword exists. Either way,
# skip ahead to the next SelectRow.
continue
# Replace the 'actual' value with the 'target'
# value.
target = service[target]
if target['populated'] == True and \
target['monitored'] == True:
target = target['ascii']
else:
target = target.read ()
row.value = target
Event.tkSet (row.choice, 'text', target)
def save (self):
if description == None:
raise RuntimeError, 'Setups.description must be defined in order to work with setups'
if extension == None:
raise RuntimeError, 'Setups.extension must be defined in order to work with setups'
if preamble == None:
raise RuntimeError, 'Setups.preamble must be defined in order to work with setups'
# Toss up a file chooser.
filename = tkFileDialog.asksaveasfilename (title='Save as...',
defaultextension=extension,
initialdir=directory,
initialfile='new_setup%s' % (extension),
filetypes=[(description, extension), ('all', '.*')])
# And, the results...
if filename == None or filename == () or filename == '':
# No file chosen-- maybe they hit cancel. That's OK.
pass
else:
# Enforce a filename suffix matching the
# Setups.extension. The use of 'defaultextension'
# above in asksaveasfilename() will append the correct
# extension if *no* extension is present in the chosen
# filename (such as 'foo'), but it will not append the
# value if any *other* extension is present (such as
# foo.bar).
extension_length = len (extension)
if len (filename) < extension_length or \
filename[-extension_length:] != extension:
filename = '%s%s' % (filename, extension)
try:
new_file = open (filename, 'w')
except IOError:
# Override permissions, if possible. This
# only occurs if the file exists, and the
# running user can't write to it-- note
# that in these circumstances, the user
# was already prompted to confirm that
# they want to overwrite the existing file.
os.chmod (filename, 0700)
new_file = open (filename, 'w')
new_file.write (preamble)
new_file.write ('\n')
# Acquire a list of active values-- rows that
# were actively selected, whose keyword affiliations
# are known, and that have a value present.
choices = []
counts = {}
for row in self.rows:
if row.selected == True and \
row.keyword != None and \
row.value != None:
pair = (row.keyword, row.value)
choices.append (pair)
try:
counts[row.keyword] += 1
except KeyError:
counts[row.keyword] = 1
# First, add in the comment-formatted values.
for pair in choices:
keyword = pair[0]
value = pair[1]
line = '%s %s.%s %s\n' % (prefix, keyword['service'], keyword['name'], value)
new_file.write (line)
new_file.write ('\n')
# Second, add in the script-formatted values.
# Each command is a backgrounded KTL_WAIT write.
# If there are duplicate keywords, it is essential
# that the 'first' write finish before the 'second'
# request is fired off.
for pair in choices:
keyword = pair[0]
value = pair[1]
if counts[keyword] == 1:
line = "modify -s %s %s='%s' &\n" % (keyword['service'], keyword['name'], value)
else:
line = "modify -s %s %s='%s'\n" % (keyword['service'], keyword['name'], value)
counts[keyword] -= 1
new_file.write (line)
# Wait for the backgrounded modify's to finish,
# regardless of success or failure, before
# exiting the script.
comment = '\n# Wait for all backgrounded modify calls to complete before exiting.\n'
line = 'wait\n'
new_file.write (comment)
new_file.write (line)
new_file.close ()
os.chmod (filename, mode)
self.destroy ()
def checkButtons (self, box=None):
''' In addition to the per-color selections, adjust the
Save As... button according to any selections.
'''
kPopup.Selection.checkButtons (self, box)
# Disable the 'Save As...' button if no stages are selected.
try:
button = self.action_button
except AttributeError:
# The action button doesn't exist yet. It's possible
# that the caller invoked toggle() while initializing
# a popup.
return
if self.noneSelected ():
Event.tkSet (button, 'state', Tkinter.DISABLED)
Event.tkSet (button, 'cursor', '')
else:
Event.tkSet (button, 'state', Tkinter.NORMAL)
Event.tkSet (button, 'cursor', 'hand2')
# end of class Popup
| {
"content_hash": "710c60bbf8c23d0060dece908bd4c181",
"timestamp": "",
"source": "github",
"line_count": 1332,
"max_line_length": 102,
"avg_line_length": 21.813063063063062,
"alnum_prop": 0.6483221476510067,
"repo_name": "alexrudy/Cauldron",
"id": "bb40bd00558d566498953c0b5a85a2de02d9baf3",
"size": "29055",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Cauldron/bundled/GUI/Setups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "840330"
}
],
"symlink_target": ""
} |
'''Tests jython.bat using the --print option'''
import os
import sys
import unittest
import tempfile
from test import test_support
from java.lang import IllegalThreadStateException
from java.lang import Runtime
from java.lang import System
from java.lang import Thread
from java.io import File
from java.io import BufferedReader;
from java.io import InputStreamReader;
class Monitor(Thread):
def __init__(self, process):
self.process = process
self.output = ''
def run(self):
reader = BufferedReader(InputStreamReader(self.getStream()))
try:
line = reader.readLine()
while line:
self.output += line
line = reader.readLine()
finally:
reader.close()
def getOutput(self):
return self.output
class StdoutMonitor(Monitor):
def __init_(self, process):
Monitor.__init__(self, process)
def getStream(self):
return self.process.getInputStream()
class StderrMonitor(Monitor):
def __init_(self, process):
Monitor.__init__(self, process)
def getStream(self):
return self.process.getErrorStream()
class StarterProcess:
def writeStarter(self, args, javaHome, jythonHome, jythonOpts, internals=False):
(starter, starterPath) = tempfile.mkstemp(suffix='.bat', prefix='starter', text=True)
starter.close()
outfilePath = starterPath[:-4] + '.out'
starter = open(starterPath, 'w') # open starter as simple file
try:
if javaHome:
starter.write('set JAVA_HOME=%s\n' % javaHome)
if jythonHome:
starter.write('set JYTHON_HOME=%s\n' % jythonHome)
if jythonOpts:
starter.write('set JYTHON_OPTS=%s\n' % jythonOpts)
if internals:
starter.write('set _JYTHON_OPTS=leaking_internals\n')
starter.write('set _JYTHON_HOME=c:/leaking/internals\n')
starter.write(self.buildCommand(args, outfilePath))
return (starterPath, outfilePath)
finally:
starter.close()
def buildCommand(self, args, outfilePath):
line = ''
for arg in args:
line += arg
line += ' '
line += '> '
line += outfilePath
line += ' 2>&1'
return line
def getOutput(self, outfilePath):
lines = ''
outfile = open(outfilePath, 'r')
try:
for line in outfile.readlines():
lines += line
finally:
outfile.close()
return lines
def isAlive(self, process):
try:
process.exitValue()
return False
except IllegalThreadStateException:
return True
def run(self, args, javaHome, jythonHome, jythonOpts, internals=False):
''' creates a start script, executes it and captures the output '''
(starterPath, outfilePath) = self.writeStarter(args, javaHome, jythonHome, jythonOpts, internals)
try:
process = Runtime.getRuntime().exec(starterPath)
stdoutMonitor = StdoutMonitor(process)
stderrMonitor = StderrMonitor(process)
stdoutMonitor.start()
stderrMonitor.start()
while self.isAlive(process):
Thread.sleep(300)
return self.getOutput(outfilePath)
finally:
os.remove(starterPath)
os.remove(outfilePath)
class BaseTest(unittest.TestCase):
def quote(self, s):
return '"' + s + '"'
def unquote(self, s):
if len(s) > 0:
if s[:1] == '"':
s = s[1:]
if len(s) > 0:
if s[-1:] == '"':
s = s[:-1]
return s
def getHomeDir(self):
ex = sys.executable
tail = ex[-15:]
if tail == '\\bin\\jython.bat':
home = ex[:-15]
else:
home = ex[:-11] # \jython.bat
return home
def assertOutput(self, flags=None, javaHome=None, jythonHome=None, jythonOpts=None, internals=False):
args = [self.quote(sys.executable), '--print']
memory = None
stack = None
prop = None
jythonArgs = None
boot = False
jdb = False
if flags:
for flag in flags:
if flag[:2] == '-J':
if flag[2:6] == '-Xmx':
memory = flag[6:]
elif flag[2:6] == '-Xss':
stack = flag[6:]
elif flag[2:4] == '-D':
prop = flag[2:]
elif flag[:2] == '--':
if flag[2:6] == 'boot':
boot = True
elif flag[2:5] == 'jdb':
jdb = True
else:
if jythonArgs:
jythonArgs += ' '
jythonArgs += flag
else:
jythonArgs = flag
jythonArgs = jythonArgs.replace('%%', '%') # workaround two .bat files
args.append(flag)
process = StarterProcess()
out = process.run(args, javaHome, jythonHome, jythonOpts, internals)
self.assertNotEquals('', out)
homeIdx = out.find('-Dpython.home=')
java = 'java'
if javaHome:
java = self.quote(self.unquote(javaHome) + '\\bin\\java')
elif jdb:
java = 'jdb'
if not memory:
memory = '512m'
if not stack:
stack = '1152k'
beginning = java + ' '
if prop:
beginning += ' ' + prop
beginning += ' -Xmx' + memory + ' -Xss' + stack + ' '
self.assertEquals(beginning, out[:homeIdx])
executableIdx = out.find('-Dpython.executable=')
homeDir = self.getHomeDir()
if jythonHome:
homeDir = self.unquote(jythonHome)
home = '-Dpython.home=' + self.quote(homeDir) + ' '
self.assertEquals(home, out[homeIdx:executableIdx])
if boot:
classpathFlag = '-Xbootclasspath/a:'
else:
classpathFlag = '-classpath'
classpathIdx = out.find(classpathFlag)
executable = '-Dpython.executable=' + self.quote(sys.executable) + ' '
if not boot:
executable += ' '
self.assertEquals(executable, out[executableIdx:classpathIdx])
# ignore full contents of classpath at the moment
classIdx = out.find('org.python.util.jython')
self.assertTrue(classIdx > classpathIdx)
restIdx = classIdx + len('org.python.util.jython')
rest = out[restIdx:].strip()
if jythonOpts:
self.assertEquals(self.quote(jythonOpts), rest)
else:
if jythonArgs:
self.assertEquals(jythonArgs, rest)
else:
self.assertEquals('', rest)
class VanillaTest(BaseTest):
def test_plain(self):
self.assertOutput()
class JavaHomeTest(BaseTest):
def test_unquoted(self):
# for the build bot, try to specify a real java home
javaHome = System.getProperty('java.home', 'C:\\Program Files\\Java\\someJava')
self.assertOutput(javaHome=javaHome)
def test_quoted(self):
self.assertOutput(javaHome=self.quote('C:\\Program Files\\Java\\someJava'))
# this currently fails, meaning we accept only quoted (x86) homes ...
def __test_x86_unquoted(self):
self.assertOutput(javaHome='C:\\Program Files (x86)\\Java\\someJava')
def test_x86_quoted(self):
self.assertOutput(javaHome=self.quote('C:\\Program Files (x86)\\Java\\someJava'))
class JythonHomeTest(BaseTest):
def createJythonJar(self, parentDir):
jar = File(parentDir, 'jython.jar')
if not jar.exists():
self.assertTrue(jar.createNewFile())
return jar
def cleanup(self, tmpdir, jar=None):
if jar and jar.exists():
self.assertTrue(jar.delete())
os.rmdir(tmpdir)
def test_unquoted(self):
jythonHomeDir = tempfile.mkdtemp()
jar = self.createJythonJar(jythonHomeDir)
self.assertOutput(jythonHome=jythonHomeDir)
self.cleanup(jythonHomeDir, jar)
def test_quoted(self):
jythonHomeDir = tempfile.mkdtemp()
jar = self.createJythonJar(jythonHomeDir)
self.assertOutput(jythonHome=self.quote(jythonHomeDir))
self.cleanup(jythonHomeDir, jar)
class JythonOptsTest(BaseTest):
def test_single(self):
self.assertOutput(jythonOpts='myOpt')
def test_multiple(self):
self.assertOutput(jythonOpts='some arbitrary options')
class InternalsTest(BaseTest):
def test_no_leaks(self):
self.assertOutput(internals=True)
class JavaOptsTest(BaseTest):
def test_memory(self):
self.assertOutput(['-J-Xmx321m'])
def test_stack(self):
self.assertOutput(['-J-Xss321k'])
def test_property(self):
self.assertOutput(['-J-DmyProperty=myValue'])
def test_property_singlequote(self):
self.assertOutput(["-J-DmyProperty='myValue'"])
# a space inside value does not work in jython.bat
def __test_property_singlequote_space(self):
self.assertOutput(["-J-DmyProperty='my Value'"])
def test_property_doublequote(self):
self.assertOutput(['-J-DmyProperty="myValue"'])
# a space inside value does not work in jython.bat
def __test_property_doublequote_space(self):
self.assertOutput(['-J-DmyProperty="my Value"'])
def test_property_underscore(self):
self.assertOutput(['-J-Dmy_Property=my_Value'])
class ArgsTest(BaseTest):
def test_file(self):
self.assertOutput(['test.py'])
def test_dash(self):
self.assertOutput(['-i'])
def test_combined(self):
self.assertOutput(['-W', 'action', 'line'])
def test_singlequoted(self):
self.assertOutput(['-c', "'import sys;'"])
def test_doublequoted(self):
self.assertOutput(['-c', '"print \'something\'"'])
def test_nestedquotes(self):
self.assertOutput(['-c', '"print \'something \"really\" cool\'"'])
def test_nestedquotes2(self):
self.assertOutput(['-c', "'print \"something \'really\' cool\"'"])
def test_underscored(self):
self.assertOutput(['-jar', 'my_stuff.jar'])
def test_property(self):
self.assertOutput(['-DmyProperty=myValue'])
def test_property_underscored(self):
self.assertOutput(['-DmyProperty=my_Value'])
def test_property_singlequoted(self):
self.assertOutput(["-DmyProperty='my_Value'"])
def test_property_doublequoted(self):
self.assertOutput(['-DmyProperty="my_Value"'])
class DoubleDashTest(BaseTest):
def test_boot(self):
self.assertOutput(['--boot'])
def test_jdb(self):
self.assertOutput(['--jdb'])
class GlobPatternTest(BaseTest):
def test_star_nonexisting(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', '*.nonexisting', '*.nonexisting'])
def test_star_nonexisting_doublequoted(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', '"*.nonexisting"', '"*.nonexisting"'])
def test_star_nonexistingfile_singlequoted(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', "'*.nonexisting'", "'*.nonexisting'"])
def test_star_existing(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', '*.bat', '*.bat'])
def test_star_existing_doublequoted(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', '"*.bat"', '"*.bat"'])
def test_star_existing_singlequoted(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', "'*.bat'", "'*.bat'"])
class ArgsSpacesTest(BaseTest):
def test_doublequoted(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', '"part1 part2"', '2nd'])
def test_singlequoted(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', "'part1 part2'", '2nd'])
# this test currently fails
def __test_unbalanced_doublequote(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', 'Scarlet O"Hara', '2nd'])
def test_unbalanced_singlequote(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', "Scarlet O'Hara", '2nd'])
class ArgsSpecialCharsTest(BaseTest):
# exclamation marks are still very special ...
def __test_exclamationmark(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', 'foo!', 'ba!r', '!baz', '!'])
# because we go through a starter.bat file, we have to simulate % with %%
def test_percentsign(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', 'foo%%1', '%%1bar', '%%1', '%%'])
def test_colon(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', 'foo:', ':bar'])
# a semicolon at the beginning of an arg currently fails (e.g. ;bar)
def test_semicolon(self):
self.assertOutput(['-c', 'import sys; print sys.argv[1:]', 'foo;'])
class DummyTest(unittest.TestCase):
def test_nothing(self):
pass
def test_main():
if os._name == 'nt':
test_support.run_unittest(VanillaTest,
JavaHomeTest,
JythonHomeTest,
JythonOptsTest,
InternalsTest,
JavaOptsTest,
ArgsTest,
DoubleDashTest,
GlobPatternTest,
ArgsSpacesTest,
ArgsSpecialCharsTest)
else:
# provide at least one test for the other platforms - happier build bots
test_support.run_unittest(DummyTest)
if __name__ == '__main__':
test_main()
| {
"content_hash": "98776d1e33a484f610b353b586b04702",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 105,
"avg_line_length": 34.175182481751825,
"alnum_prop": 0.5685604442545921,
"repo_name": "nvoron23/socialite",
"id": "0cb73842fccc314d797bbb6ed8381d797b5ed88a",
"size": "14046",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "jython/Lib/test/test_bat_jy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "35416"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Java",
"bytes": "2253475"
},
{
"name": "Python",
"bytes": "10833034"
},
{
"name": "R",
"bytes": "752"
},
{
"name": "Shell",
"bytes": "29299"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import bowerstatic
from webtest import TestApp as Client
import os
import sys
import json
from datetime import datetime, timedelta
import pytest
from bowerstatic import compat
from bowerstatic import filesystem_microsecond_autoversion
def test_local_falls_back_to_components():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
response = c.get('/bowerstatic/components/jquery/2.1.1/dist/jquery.js')
assert response.body == b'/* jquery.js 2.1.1 */\n'
def test_local_with_local_component_main():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
path = os.path.join(
os.path.dirname(__file__), 'local_component')
local.component(path, version='2.0')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('local_component')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/local/local_component/2.0/local.js">'
b'</script></head><body>Hello!</body></html>')
response = c.get('/bowerstatic/local/local_component/2.0/local.js')
assert response.body == b'/* this is local.js */\n'
def test_local_with_local_component_specific_file():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
path = os.path.join(
os.path.dirname(__file__), 'local_component')
local.component(path, version='2.0')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('local_component/local.js')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/local/local_component/2.0/local.js">'
b'</script></head><body>Hello!</body></html>')
response = c.get('/bowerstatic/local/local_component/2.0/local.js')
assert response.body == b'/* this is local.js */\n'
def test_local_internal_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
path = os.path.join(
os.path.dirname(__file__), 'local_component')
local.component(path, version='2.0')
local.resource('local_component/second.js', dependencies=[
'local_component/local.js'])
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('local_component/second.js')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/local/local_component/2.0/local.js"></script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/local/local_component/2.0/second.js"></script>'
b'</head><body>Hello!</body></html>')
def test_local_external_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
path = os.path.join(
os.path.dirname(__file__), 'local_component')
local.component(path, version='2.0')
local.resource('local_component/local.js', dependencies=[
'jquery'])
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('local_component/local.js')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/local/local_component/2.0/local.js"></script>'
b'</head><body>Hello!</body></html>')
def test_local_bower_json_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
path = os.path.join(
os.path.dirname(__file__), 'local_component_deps')
local.component(path, version='2.0')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('local_component')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/local/local_component/2.0/local.js"></script>'
b'</head><body>Hello!</body></html>')
def test_local_with_missing_version():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
path = os.path.join(
os.path.dirname(__file__), 'local_component_missing_version')
with pytest.raises(ValueError) as err:
local.component(path, version=None)
assert str(err.value).startswith('Missing _release and version in')
assert str(err.value).endswith('/tests/local_component_missing_version')
# FIXME: strictly speaking Linux ext3 also has a failing bug here,
# but I don't know how to reliably detect whether the filesystem has
# only second granularity so this will have to do
@pytest.mark.skipif(
sys.platform == 'darwin',
reason="Microsecond granularity does not work on Mac OS X")
def test_local_with_microsecond_auto_version(tmpdir):
# need to cut things a bit of slack, as filesystem time can apparently
# be ahead slightly
after_dt = datetime.now() - timedelta(seconds=1)
# create a bower component directory
component_dir = tmpdir.mkdir('component')
bower_json_file = component_dir.join('bower.json')
bower_json_file.write(json.dumps({
'name': 'component',
'version': '2.1', # should be ignored
'main': 'main.js'
}))
main_js_file = component_dir.join('main.js')
main_js_file.write('/* this is main.js */')
# now expose it through local
bower = bowerstatic.Bower(autoversion=filesystem_microsecond_autoversion)
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
local.component(component_dir.strpath, version=None)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('component/main.js')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
before_dt = datetime.now()
def get_url_dt(response):
s = compat.text_type(response.body, 'UTF-8')
start = s.find('src="') + len('src="')
end = s.find('"', start)
url = s[start:end]
parts = url.split('/')
url_dt_str = parts[4]
url_dt = datetime.strptime(url_dt_str, '%Y-%m-%dT%H:%M:%S.%f')
return url_dt_str, url_dt
url_dt_str, url_dt = get_url_dt(response)
assert url_dt >= after_dt
assert url_dt <= before_dt
response = c.get('/bowerstatic/local/component/%s/main.js' % url_dt_str)
assert response.body == b'/* this is main.js */'
after_dt = datetime.now() - timedelta(seconds=1)
# now we modify a file
main_js_file.write('/* this is main.js, modified */')
response = c.get('/')
before_dt = datetime.now()
original_url_dt_str, original_url_dt = url_dt_str, url_dt
url_dt_str, url_dt = get_url_dt(response)
assert original_url_dt_str != url_dt_str
assert url_dt >= after_dt
assert url_dt <= before_dt
assert url_dt > original_url_dt
c.get('/bowerstatic/local/component/%s/main.js' % original_url_dt_str,
status=404)
response = c.get('/bowerstatic/local/component/%s/main.js' %
url_dt_str)
assert response.body == b'/* this is main.js, modified */'
def test_local_with_second_auto_version(tmpdir):
# need to cut things a bit of slack, as filesystem time can apparently
# be ahead slightly
after_dt = datetime.now() - timedelta(seconds=1)
# create a bower component directory
component_dir = tmpdir.mkdir('component')
bower_json_file = component_dir.join('bower.json')
bower_json_file.write(json.dumps({
'name': 'component',
'version': '2.1', # should be ignored
'main': 'main.js'
}))
main_js_file = component_dir.join('main.js')
main_js_file.write('/* this is main.js */')
# now expose it through local
# the default autoversioning scheme uses second granularity
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
local = bower.local_components('local', components)
local.component(component_dir.strpath, version=None)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = local.includer(environ)
include('component/main.js')
return [b'<html><head></head><body>Hello!</body></html>']
wrapped = bower.wrap(wsgi)
c = Client(wrapped)
response = c.get('/')
before_dt = datetime.now()
def get_url_dt(response):
s = compat.text_type(response.body, 'UTF-8')
start = s.find('src="') + len('src="')
end = s.find('"', start)
url = s[start:end]
parts = url.split('/')
url_dt_str = parts[4]
url_dt = datetime.strptime(url_dt_str, '%Y-%m-%dT%H:%M:%S')
return url_dt_str, url_dt
url_dt_str, url_dt = get_url_dt(response)
assert url_dt >= after_dt
assert url_dt <= before_dt
response = c.get('/bowerstatic/local/component/%s/main.js' % url_dt_str)
assert response.body == b'/* this is main.js */'
after_dt = datetime.now() - timedelta(seconds=1)
# now we modify a file
main_js_file.write('/* this is main.js, modified */')
response = c.get('/')
before_dt = datetime.now()
original_url_dt = url_dt
url_dt_str, url_dt = get_url_dt(response)
assert url_dt >= after_dt
assert url_dt <= before_dt
assert url_dt >= original_url_dt
response = c.get('/bowerstatic/local/component/%s/main.js' %
url_dt_str)
assert response.body == b'/* this is main.js, modified */'
| {
"content_hash": "77d2bc63255b8da70e23db573b3afb3f",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 79,
"avg_line_length": 32.03266331658291,
"alnum_prop": 0.622715507098596,
"repo_name": "Preston-Landers/bowerstatic",
"id": "da7fdbce9aaa496923ad73c33bccc0889d3a7cba",
"size": "12749",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bowerstatic/tests/test_local.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "94"
},
{
"name": "Python",
"bytes": "72384"
}
],
"symlink_target": ""
} |
"""Support for Salda Smarty XP/XV Ventilation Unit Binary Sensors."""
import logging
from homeassistant.core import callback
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN, SIGNAL_UPDATE_SMARTY
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Smarty Binary Sensor Platform."""
smarty = hass.data[DOMAIN]["api"]
name = hass.data[DOMAIN]["name"]
sensors = [
AlarmSensor(name, smarty),
WarningSensor(name, smarty),
BoostSensor(name, smarty),
]
async_add_entities(sensors, True)
class SmartyBinarySensor(BinarySensorDevice):
"""Representation of a Smarty Binary Sensor."""
def __init__(self, name, device_class, smarty):
"""Initialize the entity."""
self._name = name
self._state = None
self._sensor_type = device_class
self._smarty = smarty
@property
def device_class(self):
"""Return the class of the sensor."""
return self._sensor_type
@property
def should_poll(self) -> bool:
"""Do not poll."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_added_to_hass(self):
"""Call to update."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_SMARTY, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
class BoostSensor(SmartyBinarySensor):
"""Boost State Binary Sensor."""
def __init__(self, name, smarty):
"""Alarm Sensor Init."""
super().__init__(
name="{} Boost State".format(name), device_class=None, smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.boost
class AlarmSensor(SmartyBinarySensor):
"""Alarm Binary Sensor."""
def __init__(self, name, smarty):
"""Alarm Sensor Init."""
super().__init__(
name="{} Alarm".format(name), device_class="problem", smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.alarm
class WarningSensor(SmartyBinarySensor):
"""Warning Sensor."""
def __init__(self, name, smarty):
"""Warning Sensor Init."""
super().__init__(
name="{} Warning".format(name), device_class="problem", smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.warning
| {
"content_hash": "4c6636f053c32a048ab8df49eed9515b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 88,
"avg_line_length": 27.89908256880734,
"alnum_prop": 0.6106543900032884,
"repo_name": "fbradyirl/home-assistant",
"id": "2d79700db78507194b4b08260e356f7a0d1f2dcd",
"size": "3041",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/smarty/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudNaturalLanguageExampleDagsTest(GoogleSystemTest):
def setUp(self):
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def test_run_example_dag(self):
self.run_dag('example_gcp_natural_language', CLOUD_DAG_FOLDER)
def tearDown(self):
super().tearDown()
| {
"content_hash": "2452fd80a214af5be3f8f5b29c175a10",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 103,
"avg_line_length": 32.388888888888886,
"alnum_prop": 0.7358490566037735,
"repo_name": "lyft/incubator-airflow",
"id": "4bcd05f7f4995cc4e38c5f3225fb240b06041af6",
"size": "1370",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/operators/test_natural_language_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/installation/shared_mockup_power_generator_fusion_style_1.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "82961bfef7c3a39db4e637f573325a10",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 96,
"avg_line_length": 25,
"alnum_prop": 0.7046153846153846,
"repo_name": "obi-two/Rebelion",
"id": "79b15b68132aed9311aae5d8d1d24682c5d72651",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/installation/shared_mockup_power_generator_fusion_style_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""fabric ops"""
import os
from fabric.api import cd
from fabric.api import env
from fabric.api import get
from fabric.api import put
from fabric.api import run
from fabric.api import shell_env
import robotx
# env.user = 'root'
env.password = os.environ['all_slave_password']
# env.hosts = ['192.168.122.56', '192.168.122.153', '192.168.122.254']
env.skip_bad_hosts = True
#env.timeout=120
env.parallel = True
def copy_files(project_path, worker_root):
"""copy all needed files to workers"""
# send tests file to worker
robotx_path = robotx.__path__[0]
worker_file = os.path.join(robotx_path, 'core', 'workerdaemon.py')
put(project_path, worker_root, use_sudo=True)
put(worker_file, worker_root, use_sudo=True)
def run_workers(worker_root, masterip, planid, project_name, other_variables):
"""run all workers on given hosts"""
worker_file = 'workerdaemon.py'
worker_cmd = 'python %s %s %s %s %s' \
% (worker_file, masterip, planid, project_name, other_variables)
with shell_env(DISPLAY=':0'):
with cd(worker_root):
run(worker_cmd)
def collect_reports(worker_root, project_name):
"""docstring for collect_reports"""
results_path = os.path.join(worker_root, project_name, 'results')
with cd(results_path):
print "\nStart to collect result files"
get('*.xml', './')
run('rm -rf *.xml')
| {
"content_hash": "d8a336d046298e2b9de740b121a6fd13",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 28,
"alnum_prop": 0.6614285714285715,
"repo_name": "ylbian/robot-d",
"id": "cd5cb8263adaad5378c4542f0c876b2cfc482092",
"size": "1400",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "robotx/core/fabworker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1433960"
},
{
"name": "Python",
"bytes": "100943"
}
],
"symlink_target": ""
} |
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with :ref:`bayesian_ridge_regression`.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, wich stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print __doc__
import numpy as np
import pylab as pl
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
################################################################################
# Generating simulated data with Gaussian weigthts
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
################################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score = True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
################################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
pl.figure(figsize=(6, 5))
pl.title("Weights of the model")
pl.plot(clf.coef_, 'b-', label="ARD estimate")
pl.plot(ols.coef_, 'r--', label="OLS estimate")
pl.plot(w, 'g-', label="Ground truth")
pl.xlabel("Features")
pl.ylabel("Values of the weights")
pl.legend(loc=1)
pl.figure(figsize=(6, 5))
pl.title("Histogram of the weights")
pl.hist(clf.coef_, bins=n_features, log=True)
pl.plot(clf.coef_[relevant_features], 5*np.ones(len(relevant_features)),
'ro', label="Relevant features")
pl.ylabel("Features")
pl.xlabel("Values of the weights")
pl.legend(loc=1)
pl.figure(figsize=(6, 5))
pl.title("Marginal log-likelihood")
pl.plot(clf.scores_)
pl.ylabel("Score")
pl.xlabel("Iterations")
pl.show()
| {
"content_hash": "86ad39071a52a1f3cd8838b05f113ae1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 31.135802469135804,
"alnum_prop": 0.6324345757335448,
"repo_name": "ominux/scikit-learn",
"id": "c0610210f88305f0e0797c8f85dd1598d1151dd4",
"size": "2522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/linear_model/plot_ard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "455969"
},
{
"name": "C++",
"bytes": "240380"
},
{
"name": "Makefile",
"bytes": "1411"
},
{
"name": "Python",
"bytes": "2064853"
},
{
"name": "Shell",
"bytes": "486"
}
],
"symlink_target": ""
} |
"""
Provides caparg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update caparg` to change this file.
from incremental import Version
__version__ = Version('caparg', 17, 4, 0)
__all__ = ["__version__"]
| {
"content_hash": "1916aebaafa518d5002ca87d91f79c85",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 23.363636363636363,
"alnum_prop": 0.6809338521400778,
"repo_name": "moshez/caparg",
"id": "cb8a30435d9e34452d816bb0368641754c5cbfc4",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/caparg/_version.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import logging
import sys
from datetime import datetime
from django.conf import settings
from django.views.debug import technical_500_response
from django.http import HttpResponseRedirect, HttpResponse
from npactweb import MissingFileError, ImmediateHttpResponseException, \
RedirectException
logger = logging.getLogger(__name__)
class AddItemsDict(object):
def process_request(self, request):
request.items = {}
return None
class NPactResponseExceptionHandler(object):
def process_exception(self, request, exception):
if isinstance(exception, ImmediateHttpResponseException):
return exception.httpResponse
elif isinstance(exception, RedirectException):
return HttpResponseRedirect(exception.url)
elif isinstance(exception, MissingFileError):
return HttpResponse(exception.message, status=404,
content_type="text/plain")
else:
logger.exception(exception)
return HttpResponse(repr(exception), status=500,
content_type="application/json")
class SaveTraceExceptionMiddleware(object):
"""
- add to middleware 'SaveTraceExceptionMiddleware',
"""
def process_exception(self, request, exception):
if not settings.DEBUG and settings.EXC_TRAC_PATH:
try:
tech_response = technical_500_response(
request, *sys.exc_info())
error_id = datetime.now().isoformat("-").replace(":", "-")
fname = "{0}/{1}.txt".format(settings.EXC_TRACE_PATH, error_id)
with open(fname, "w") as fout:
fout.write(tech_response.content)
logger.info(
"Exception technical response saved in %s", fout.name)
except Exception, e:
logger.error(
"Error when saving exception to file: '%s' / '%s' ",
str(sys.exc_info()), str(e))
| {
"content_hash": "b5bf157fab3e76ace293af95f223c133",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 36.78181818181818,
"alnum_prop": 0.6183885318833415,
"repo_name": "victor-lin/npact",
"id": "68d14799178a9d28b18604071de1844b635dc1d7",
"size": "2023",
"binary": false,
"copies": "1",
"ref": "refs/heads/acgt_gamma",
"path": "npactweb/npactweb/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "239355"
},
{
"name": "CSS",
"bytes": "19120"
},
{
"name": "HTML",
"bytes": "67083"
},
{
"name": "JavaScript",
"bytes": "207930"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "Python",
"bytes": "268948"
},
{
"name": "Shell",
"bytes": "975"
}
],
"symlink_target": ""
} |
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
# Test generated documentation of attributes, elements, and classes
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
from pyxb.utils import six
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="tEnumerations">
<xs:annotation><xs:documentation>Documentation for type tEnumerations</xs:documentation></xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="one">
<xs:annotation><xs:documentation>Documentation for tEnumerations.one</xs:documentation></xs:annotation>
</xs:enumeration>
<xs:enumeration value="two">
<xs:annotation><xs:documentation>Documentation for tEnumerations.two</xs:documentation></xs:annotation>
</xs:enumeration>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="tComplex">
<xs:annotation><xs:documentation>Documentation for tComplex</xs:documentation></xs:annotation>
<xs:complexContent>
<xs:extension base="xs:anyType">
<xs:sequence>
<xs:element name="elt" type="xs:string">
<xs:annotation><xs:documentation>Documentation for element C{elt} in C{tComplex}</xs:documentation></xs:annotation>
</xs:element>
<xs:element ref="element" minOccurs="0" >
<xs:annotation><xs:documentation>How does documentation for a referenced element come out?</xs:documentation></xs:annotation>
</xs:element>
</xs:sequence>
<xs:attribute name="attr" type="xs:string">
<xs:annotation><xs:documentation>Documentation for attribute C{attr} in C{tComplex}</xs:documentation></xs:annotation>
</xs:attribute>
</xs:extension>
</xs:complexContent>
</xs:complexType>
<xs:element name="element" type="tComplex">
<xs:annotation><xs:documentation>Documentation for element C{element}
Multi-line.
With " and ' characters even.
</xs:documentation></xs:annotation>
</xs:element>
</xs:schema>
'''
#open('schema.xsd', 'w').write(xsd)
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_200908111918 (unittest.TestCase):
def testComponent (self):
self.assertEqual(element._description(), '''element (tComplex)
Documentation for element C{element}
Multi-line.
With " and ' characters even.
''')
desc1 = six.u('''tComplex, element-only content
Attributes:
attr: attr ({http://www.w3.org/2001/XMLSchema}string), optional
Wildcard attribute(s)
Elements:
elt: elt ({http://www.w3.org/2001/XMLSchema}string), local to tComplex
element: element (tComplex), local to tComplex
Wildcard element(s)''')
desc2 = six.u('''tComplex, element-only content
Attributes:
attr: attr ({http://www.w3.org/2001/XMLSchema}string), optional
Wildcard attribute(s)
Elements:
element: element (tComplex), local to tComplex
elt: elt ({http://www.w3.org/2001/XMLSchema}string), local to tComplex
Wildcard element(s)''')
self.assertTrue(tComplex._description() in (desc1, desc2))
self.assertEqual(tEnumerations._description(), '''tEnumerations restriction of {http://www.w3.org/2001/XMLSchema}string
Documentation for type tEnumerations''')
# NOTE It is arguably a bug that the local annotation for the
# reference element has been lost. When somebody else
# discovers this and complains, we'll think about fixing it.
self.assertEqual(tComplex.element.__doc__, '''Documentation for element C{element}
Multi-line.
With " and ' characters even.
''')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5e8b694aa2a608be4ccb813ba5580c86",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 135,
"avg_line_length": 36.523809523809526,
"alnum_prop": 0.7066492829204694,
"repo_name": "CantemoInternal/pyxb",
"id": "d11723ef77813a91739e426dca732141d1ccf515",
"size": "3859",
"binary": false,
"copies": "2",
"ref": "refs/heads/next",
"path": "tests/bugs/test-200908111918.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "383"
},
{
"name": "Python",
"bytes": "1935375"
},
{
"name": "Shell",
"bytes": "27215"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
from django.core.cache import cache
from django.db import connection
from django.test import TestCase
from explorer.app_settings import EXPLORER_DEFAULT_CONNECTION as CONN
from explorer import schema
class TestSchemaInfo(TestCase):
def setUp(self):
cache.clear()
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_schema_info_returns_valid_data(self, mocked_excludes,
mocked_includes):
mocked_includes.return_value = None
mocked_excludes.return_value = []
res = schema.schema_info(CONN)
assert mocked_includes.called # sanity check: ensure patch worked
tables = [x[0] for x in res]
self.assertIn('explorer_query', tables)
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_table_exclusion_list(self, mocked_excludes, mocked_includes):
mocked_includes.return_value = None
mocked_excludes.return_value = ('explorer_',)
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertNotIn('explorer_query', tables)
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_app_inclusion_list(self, mocked_excludes, mocked_includes):
mocked_includes.return_value = ('auth_',)
mocked_excludes.return_value = []
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertNotIn('explorer_query', tables)
self.assertIn('auth_user', tables)
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_app_inclusion_list_excluded(self, mocked_excludes,
mocked_includes):
# Inclusion list "wins"
mocked_includes.return_value = ('explorer_',)
mocked_excludes.return_value = ('explorer_',)
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertIn('explorer_query', tables)
@patch('explorer.schema._include_views')
def test_app_include_views(self, mocked_include_views):
database_view = setup_sample_database_view()
mocked_include_views.return_value = True
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertIn(database_view, tables)
@patch('explorer.schema._include_views')
def test_app_exclude_views(self, mocked_include_views):
database_view = setup_sample_database_view()
mocked_include_views.return_value = False
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertNotIn(database_view, tables)
@patch('explorer.schema.do_async')
def test_builds_async(self, mocked_async_check):
mocked_async_check.return_value = True
self.assertIsNone(schema.schema_info(CONN))
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertIn('explorer_query', tables)
def setup_sample_database_view():
with connection.cursor() as cursor:
cursor.execute(
"CREATE VIEW IF NOT EXISTS v_explorer_query AS SELECT title, "
"sql from explorer_query"
)
return 'v_explorer_query'
| {
"content_hash": "9fd5c9a1fb56c10455677abc791f046d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 74,
"avg_line_length": 37.89772727272727,
"alnum_prop": 0.64047976011994,
"repo_name": "groveco/django-sql-explorer",
"id": "73d1f43aa0c29431c5dcdcb38ea31a7c0746bd7d",
"size": "3359",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "explorer/tests/test_schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2444"
},
{
"name": "HTML",
"bytes": "50780"
},
{
"name": "JavaScript",
"bytes": "11601"
},
{
"name": "Python",
"bytes": "135883"
},
{
"name": "Shell",
"bytes": "419"
}
],
"symlink_target": ""
} |
import unittest2 as unittest
from mocker import Mocker
from zope.component import getUtility
from zope.component import getMultiAdapter
from zope.interface import alsoProvides
from plone.registry.interfaces import IRegistry
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from z3c.form.interfaces import IFormLayer
from collective.flattr.tests.base import COLLECTIVE_FLATTR_INTEGRATION_TESTING
from collective.flattr.interfaces import ICollectiveFlattr
from collective.flattr.browser.flattr import Flattr
from collective.flattr.tests.mocks import MockOpener
# have to split into test_controlpanel.py and test_controlpanel_call.py,
# because mocker does not like duplicate replaces.
class controlpanel(object):
def __init__(self, portal, request):
self.portal = portal
self.request = request
def __enter__(self):
return getMultiAdapter((self.portal, self.request),
name='flattr-controlpanel')
def __exit__(self, type, value, traceback):
pass
class TestControlPanel(unittest.TestCase):
layer = COLLECTIVE_FLATTR_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ('Manager',))
self.reg = getUtility(IRegistry).forInterface(ICollectiveFlattr)
self.mocker = None
alsoProvides(self.layer['request'], IFormLayer)
def tearDown(self):
if self.mocker:
self.mocker.reset()
def test_test_access_token_fail(self):
self.reg.access_token = u'NEW'
self.reg.access_token_type = u'Bearer'
self.mocker = Mocker()
flattr_view = self.mocker.patch(Flattr)
flattr_view.opener
self.mocker.result(MockOpener('{"error": "unauthorized", "error_description": "Hello World"}', error=True, verify_data=lambda x: x.get_full_url()==u'https://api.flattr.com/rest/v2/user' and x.headers=={'Content-type':'application/json'}))
self.layer['request'].form = {'form.button.TestAccessToken': True}
with controlpanel(self.portal, self.layer['request']) as view:
with self.mocker:
ret = view()
self.failUnless(u'Configured access token does not work :(' in ret)
self.failUnless(u'error' in ret)
self.assertEquals(view.test_access_token,
u'Configured access token does not work :(')
def test_test_access_token_success(self):
self.reg.access_token = u'NEW'
self.reg.access_token_type = u'Bearer'
self.reg.customer_key = u'Key'
self.reg.customer_secret = u'Secret'
self.mocker = Mocker()
flattr_view = self.mocker.patch(Flattr)
flattr_view.opener
self.mocker.result(MockOpener('{"username": "Hello World"}', verify_data=lambda x: x.get_full_url()==u'https://api.flattr.com/rest/v2/user' and x.headers=={'Content-type':'application/json'}))
self.layer['request'].form = {'form.button.TestAccessToken': True}
with controlpanel(self.portal, self.layer['request']) as view:
with self.mocker:
ret = view()
self.failUnless(u'username' in ret)
self.failUnless(u'Hello World' in ret)
self.failUnless(u'username' in view.test_access_token)
self.failUnless(u'Hello World' in view.test_access_token)
# call again. without access token
self.reg.access_token = u''
self.reg.access_token_type = u''
ret = view()
self.failUnless(u'error' in ret)
self.failUnless(u'No access token configured' in ret)
def test_authenticate(self):
self.reg.customer_key = u'Key'
self.reg.customer_secret = u'Secret'
self.layer['request'].form = {'form.button.AcquireToken': True}
with controlpanel(self.portal, self.layer['request']) as view:
self.reg.customer_key = u''
ret = view()
self.failUnless(u'error' in ret)
self.failUnless(u'Unable to create authorize '
'url. consumer and consumer_secret not configured :(' in ret)
# call again for success
self.reg.customer_key = u'consumer'
ret = view()
self.assertEquals(ret, None)
redirect = self.layer['request'].response.headers['location']
self.failUnless(redirect.startswith(self.reg.authorize_url))
self.failUnless(u'collective_flattr' in redirect)
self.failUnless(u'client_id=consumer' in redirect)
self.failUnless(u'redirect_uri' in redirect)
self.failUnless(u'scope=thing' in redirect)
self.failUnless(u'response_type=code' in redirect)
def test_clear_access_token(self):
self.layer['request'].form = {'form.button.ClearAccessToken': True}
self.reg.access_token = u'my access token'
self.reg.access_token_type = u'my access token secret'
with controlpanel(self.portal, self.layer['request']) as view:
ret = view()
self.failUnless(u'info' in ret)
self.failUnless(u'Cleared access token' in ret)
self.assertEquals(self.reg.access_token, u'')
self.assertEquals(self.reg.access_token_type, u'')
| {
"content_hash": "b4d426a72caac430e52f77f8f9d14f7e",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 246,
"avg_line_length": 38.056338028169016,
"alnum_prop": 0.6352701702442635,
"repo_name": "chrigl/docker-library",
"id": "51d1183abcfd38aea8a7005f3f0cd4fc9d7c22a0",
"size": "5404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plone-chrigl-debian/src/collective.flattr/build/lib/collective/flattr/tests/test_controlpanel_call.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12119"
},
{
"name": "JavaScript",
"bytes": "2331"
},
{
"name": "Python",
"bytes": "239995"
},
{
"name": "Shell",
"bytes": "1519"
}
],
"symlink_target": ""
} |
import mock
import prawdditions.patch
import pytest
from .. import IntegrationTest
class TestPrawdditionWikiPage(IntegrationTest):
@mock.patch("time.sleep", return_value=None)
def test_update__no_conflict(self, _):
prawdditions.patch.patch()
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
page = subreddit.wiki["praw_test_page"]
self.reddit.read_only = False
with self.recorder.use_cassette(
"TestPrawdditionWikiPage.test_update__no_conflict"
):
page.update(lambda x: x + " | a suffix")
@mock.patch("time.sleep", return_value=None)
def test_update__conflict(self, _):
prawdditions.patch.patch()
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
page = subreddit.wiki["praw_test_page"]
repeat = True
self.reddit.read_only = False
def update_fn(text):
nonlocal repeat
if repeat:
page.edit("A new body")
repeat = False
return text + " | a suffix"
with self.recorder.use_cassette(
"TestPrawdditionWikiPage.test_update__conflict"
):
page.update(update_fn)
| {
"content_hash": "7706bbae67a553db89b1b7462d79be2e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 32.73684210526316,
"alnum_prop": 0.6181672025723473,
"repo_name": "praw-dev/prawdditions",
"id": "09339a227000adb58aa4d1d7e7079475b9b26385",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/patch/test_prawddition_wikipage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9491"
}
],
"symlink_target": ""
} |
"""Helpers to help coordinate updates."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable
from datetime import datetime, timedelta
import logging
from time import monotonic
from typing import Callable, Generic, TypeVar
import urllib.error
import aiohttp
import requests
from homeassistant import config_entries
from homeassistant.core import CALLBACK_TYPE, Event, HassJob, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import entity, event
from homeassistant.util.dt import utcnow
from .debounce import Debouncer
REQUEST_REFRESH_DEFAULT_COOLDOWN = 10
REQUEST_REFRESH_DEFAULT_IMMEDIATE = True
T = TypeVar("T")
class UpdateFailed(Exception):
"""Raised when an update has failed."""
class DataUpdateCoordinator(Generic[T]):
"""Class to manage fetching data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
logger: logging.Logger,
*,
name: str,
update_interval: timedelta | None = None,
update_method: Callable[[], Awaitable[T]] | None = None,
request_refresh_debouncer: Debouncer | None = None,
) -> None:
"""Initialize global data updater."""
self.hass = hass
self.logger = logger
self.name = name
self.update_method = update_method
self.update_interval = update_interval
self.config_entry = config_entries.current_entry.get()
# It's None before the first successful update.
# Components should call async_config_entry_first_refresh
# to make sure the first update was successful.
# Set type to just T to remove annoying checks that data is not None
# when it was already checked during setup.
self.data: T = None # type: ignore[assignment]
self._listeners: list[CALLBACK_TYPE] = []
self._job = HassJob(self._handle_refresh_interval)
self._unsub_refresh: CALLBACK_TYPE | None = None
self._request_refresh_task: asyncio.TimerHandle | None = None
self.last_update_success = True
self.last_exception: Exception | None = None
if request_refresh_debouncer is None:
request_refresh_debouncer = Debouncer(
hass,
logger,
cooldown=REQUEST_REFRESH_DEFAULT_COOLDOWN,
immediate=REQUEST_REFRESH_DEFAULT_IMMEDIATE,
function=self.async_refresh,
)
else:
request_refresh_debouncer.function = self.async_refresh
self._debounced_refresh = request_refresh_debouncer
@callback
def async_add_listener(self, update_callback: CALLBACK_TYPE) -> Callable[[], None]:
"""Listen for data updates."""
schedule_refresh = not self._listeners
self._listeners.append(update_callback)
# This is the first listener, set up interval.
if schedule_refresh:
self._schedule_refresh()
@callback
def remove_listener() -> None:
"""Remove update listener."""
self.async_remove_listener(update_callback)
return remove_listener
@callback
def async_remove_listener(self, update_callback: CALLBACK_TYPE) -> None:
"""Remove data update."""
self._listeners.remove(update_callback)
if not self._listeners and self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
@callback
def _schedule_refresh(self) -> None:
"""Schedule a refresh."""
if self.update_interval is None:
return
if self.config_entry and self.config_entry.pref_disable_polling:
return
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
# We _floor_ utcnow to create a schedule on a rounded second,
# minimizing the time between the point and the real activation.
# That way we obtain a constant update frequency,
# as long as the update process takes less than a second
self._unsub_refresh = event.async_track_point_in_utc_time(
self.hass,
self._job,
utcnow().replace(microsecond=0) + self.update_interval,
)
async def _handle_refresh_interval(self, _now: datetime) -> None:
"""Handle a refresh interval occurrence."""
self._unsub_refresh = None
await self._async_refresh(log_failures=True, scheduled=True)
async def async_request_refresh(self) -> None:
"""Request a refresh.
Refresh will wait a bit to see if it can batch them.
"""
await self._debounced_refresh.async_call()
async def _async_update_data(self) -> T:
"""Fetch the latest data from the source."""
if self.update_method is None:
raise NotImplementedError("Update method not implemented")
return await self.update_method()
async def async_config_entry_first_refresh(self) -> None:
"""Refresh data for the first time when a config entry is setup.
Will automatically raise ConfigEntryNotReady if the refresh
fails. Additionally logging is handled by config entry setup
to ensure that multiple retries do not cause log spam.
"""
await self._async_refresh(log_failures=False, raise_on_auth_failed=True)
if self.last_update_success:
return
ex = ConfigEntryNotReady()
ex.__cause__ = self.last_exception
raise ex
async def async_refresh(self) -> None:
"""Refresh data and log errors."""
await self._async_refresh(log_failures=True)
async def _async_refresh( # noqa: C901
self,
log_failures: bool = True,
raise_on_auth_failed: bool = False,
scheduled: bool = False,
) -> None:
"""Refresh data."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
self._debounced_refresh.async_cancel()
if scheduled and self.hass.is_stopping:
return
start = monotonic()
auth_failed = False
try:
self.data = await self._async_update_data()
except (asyncio.TimeoutError, requests.exceptions.Timeout) as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error("Timeout fetching %s data", self.name)
self.last_update_success = False
except (aiohttp.ClientError, requests.exceptions.RequestException) as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error("Error requesting %s data: %s", self.name, err)
self.last_update_success = False
except urllib.error.URLError as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
if err.reason == "timed out":
self.logger.error("Timeout fetching %s data", self.name)
else:
self.logger.error(
"Error requesting %s data: %s", self.name, err
)
self.last_update_success = False
except UpdateFailed as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error("Error fetching %s data: %s", self.name, err)
self.last_update_success = False
except ConfigEntryAuthFailed as err:
auth_failed = True
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error(
"Authentication failed while fetching %s data: %s",
self.name,
err,
)
self.last_update_success = False
if raise_on_auth_failed:
raise
if self.config_entry:
self.config_entry.async_start_reauth(self.hass)
except NotImplementedError as err:
self.last_exception = err
raise err
except Exception as err: # pylint: disable=broad-except
self.last_exception = err
self.last_update_success = False
self.logger.exception(
"Unexpected error fetching %s data: %s", self.name, err
)
else:
if not self.last_update_success:
self.last_update_success = True
self.logger.info("Fetching %s data recovered", self.name)
finally:
self.logger.debug(
"Finished fetching %s data in %.3f seconds (success: %s)",
self.name,
monotonic() - start,
self.last_update_success,
)
if not auth_failed and self._listeners and not self.hass.is_stopping:
self._schedule_refresh()
for update_callback in self._listeners:
update_callback()
@callback
def async_set_updated_data(self, data: T) -> None:
"""Manually update data, notify listeners and reset refresh interval."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
self._debounced_refresh.async_cancel()
self.data = data
self.last_update_success = True
self.logger.debug(
"Manually updated %s data",
self.name,
)
if self._listeners:
self._schedule_refresh()
for update_callback in self._listeners:
update_callback()
@callback
def _async_stop_refresh(self, _: Event) -> None:
"""Stop refreshing when Home Assistant is stopping."""
self.update_interval = None
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
class CoordinatorEntity(Generic[T], entity.Entity):
"""A class for entities using DataUpdateCoordinator."""
def __init__(self, coordinator: DataUpdateCoordinator[T]) -> None:
"""Create the entity with a DataUpdateCoordinator."""
self.coordinator = coordinator
@property
def should_poll(self) -> bool:
"""No need to poll. Coordinator notifies entity of updates."""
return False
@property
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.coordinator.async_add_listener(self._handle_coordinator_update)
)
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self.async_write_ha_state()
async def async_update(self) -> None:
"""Update the entity.
Only used by the generic entity update service.
"""
# Ignore manual update requests if the entity is disabled
if not self.enabled:
return
await self.coordinator.async_request_refresh()
| {
"content_hash": "add2588df1295190afa20f8d8c1679d1",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 87,
"avg_line_length": 34.294642857142854,
"alnum_prop": 0.5957649917556191,
"repo_name": "FreekingDean/home-assistant",
"id": "2203ab240ef570962932c057624a245a9f36568b",
"size": "11523",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/update_coordinator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from reprlib import *
else:
__future_module__ = True
from repr import *
| {
"content_hash": "a1d778ade82a6f2a076de7d539cb2acd",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 38,
"avg_line_length": 21.125,
"alnum_prop": 0.650887573964497,
"repo_name": "noisemaster/AdamTestBot",
"id": "0a40eb09424575d4b756b08c4af4982a6cf5a071",
"size": "169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "future/moves/reprlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "54"
},
{
"name": "Python",
"bytes": "3464312"
},
{
"name": "Shell",
"bytes": "406"
}
],
"symlink_target": ""
} |
from django.utils.translation import gettext_lazy as _
from shop.models.delivery import BaseDelivery
class Delivery(BaseDelivery):
"""Default materialized model for OrderShipping"""
class Meta(BaseDelivery.Meta):
verbose_name = _("Delivery")
verbose_name_plural = _("Deliveries")
| {
"content_hash": "88e042639baa35ddf9402d2c335236f5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 34,
"alnum_prop": 0.7222222222222222,
"repo_name": "awesto/django-shop",
"id": "54b532b5a1f5d8609cd458809e16b0a06e9814b0",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/models/defaults/delivery.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "107122"
},
{
"name": "JavaScript",
"bytes": "51946"
},
{
"name": "Python",
"bytes": "588560"
}
],
"symlink_target": ""
} |
import json
import logging
logger = logging.getLogger(__name__)
def load_encoded_json(encoded_data):
"""
Args: encoded_data (str):
Encoded JSON data
Returns:
data (dict):
Dictionary of decoded JSON content
"""
# TODO(joshblum): grep around the code base and use this function where we
# can
try:
json_data = encoded_data.decode('utf-8')
data = json.loads(json_data)
except (AttributeError, ValueError, UnicodeError) as e:
logger.warning('Json decode error {}'.format(str(e)))
data = {}
return data
| {
"content_hash": "44ca3a2ddc384f01ce757812e25beb64",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 25.956521739130434,
"alnum_prop": 0.6147403685092128,
"repo_name": "b12io/orchestra",
"id": "8dcb59fd949b72017c8dcaf149f67bcad31c3055",
"size": "597",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "orchestra/utils/load_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50496"
},
{
"name": "HTML",
"bytes": "101830"
},
{
"name": "JavaScript",
"bytes": "353673"
},
{
"name": "Makefile",
"bytes": "1234"
},
{
"name": "Python",
"bytes": "975395"
},
{
"name": "SCSS",
"bytes": "32860"
},
{
"name": "Shell",
"bytes": "26"
},
{
"name": "TypeScript",
"bytes": "20983"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutGenerators(Koan):
def test_generating_values_on_the_fly(self):
result = list()
bacon_generator = (n + ' bacon' for \
n in ['crunchy', 'veggie', 'danish'])
for bacon in bacon_generator:
result.append(bacon)
self.assertEqual(['crunchy bacon', 'veggie bacon', 'danish bacon'], result)
def test_generators_are_different_to_list_comprehensions(self):
num_list = [x * 2 for x in range(1, 3)]
# print "num_list:", num_list
num_generator = (x * 2 for x in range(1, 3))
# print "num_generator:", type(list(num_generator)[0])
self.assertEqual(2, num_list[0])
# A generator has to be iterated through.
self.assertEqual(2, list(num_generator)[0])
# Both list comprehensions and generators can be iterated
# though. However, a generator function is only called on the
# first iteration. The values are generated on the fly instead
# of stored.
#
# Generators are more memory friendly, but less versatile
def test_generator_expressions_are_a_one_shot_deal(self):
dynamite = ('Boom!' for n in range(3))
attempt1 = list(dynamite)
attempt2 = list(dynamite)
self.assertEqual(['Boom!', 'Boom!', 'Boom!'], list(attempt1))
self.assertEqual([], list(attempt2))
# ------------------------------------------------------------------
def simple_generator_method(self):
yield 'peanut'
yield 'butter'
yield 'and'
yield 'jelly'
def test_generator_method_will_yield_values_during_iteration(self):
result = list()
for item in self.simple_generator_method():
result.append(item)
self.assertEqual(['peanut', 'butter', 'and', 'jelly'], result)
def test_coroutines_can_take_arguments(self):
result = self.simple_generator_method()
self.assertEqual('peanut', next(result))
self.assertEqual('butter', next(result))
result.close()
# ------------------------------------------------------------------
def square_me(self, seq):
for x in seq:
yield x * x
def test_generator_method_with_parameter(self):
result = self.square_me(range(2, 5))
self.assertEqual([4, 9, 16], list(result))
# ------------------------------------------------------------------
def sum_it(self, seq):
value = 0
for num in seq:
# The local state of 'value' will be retained between iterations
value += num
yield value
def test_generator_keeps_track_of_local_variables(self):
result = self.sum_it(range(2, 5))
self.assertEqual([2, 5, 9], list(result))
# ------------------------------------------------------------------
def generator_with_coroutine(self):
result = yield
yield result
def test_generators_can_take_coroutines(self):
generator = self.generator_with_coroutine()
# THINK ABOUT IT:
# Why is this line necessary?
#
# Hint: Read the "Specification: Sending Values into Generators"
# section of http://www.python.org/dev/peps/pep-0342/
next(generator)
self.assertEqual(3, generator.send(1 + 2))
def test_before_sending_a_value_to_a_generator_next_must_be_called(self):
generator = self.generator_with_coroutine()
try:
generator.send(1 + 2)
except TypeError as ex:
self.assertMatch("can't send non-None value to a just-started generator", ex[0])
# ------------------------------------------------------------------
def yield_tester(self):
value = yield
if value:
yield value
else:
yield 'no value'
def test_generators_can_see_if_they_have_been_called_with_a_value(self):
generator = self.yield_tester()
next(generator)
self.assertEqual('with value', generator.send('with value'))
generator2 = self.yield_tester()
next(generator2)
self.assertEqual('no value', next(generator2))
def test_send_none_is_equivalent_to_next(self):
generator = self.yield_tester()
next(generator)
# 'next(generator)' is exactly equivalent to 'generator.send(None)'
self.assertEqual('no value', generator.send(None))
| {
"content_hash": "09581858db11e793341d9636f469271c",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 92,
"avg_line_length": 33.74242424242424,
"alnum_prop": 0.5541086663673103,
"repo_name": "Turivniy/Python_koans",
"id": "5c4b9242d8a5fb17e9fa433a4d1b6c7ca8b8c9df",
"size": "4652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_generators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "341550"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
import logging
import re
from fabric.api import execute, hide, sudo
RING_TYPES = ['account', 'container', 'object']
_host_lshw_output = {}
LOG = logging.getLogger(__name__)
def _parse_lshw_output(output, blockdev):
disks = re.split('\s*\*', output.strip())
alldisks = []
for disk in disks:
d = {}
for line in disk.split('\n'):
match = re.match('^-(\w+)', line)
if match:
d['class'] = match.group(1)
else:
match = re.match('^\s+([\w\s]+):\s+(.*)$', line)
if match:
key = re.sub('\s', '_', match.group(1))
val = match.group(2)
d[key] = val
if 'class' in d:
alldisks.append(d)
for d in alldisks:
if d['logical_name'] == blockdev:
serial = d['serial']
match = re.match('\s*(\d+)[MG]iB.*', d['size'])
if not match:
raise Exception("Could not find size of disk %s" % disk)
size = int(match.group(1))
return size, serial
def _fab_get_disk_size_serial(ip, blockdev):
with hide('running', 'stdout', 'stderr'):
global _host_lshw_output
output = None
if ip in _host_lshw_output:
output = _host_lshw_output[ip]
else:
output = sudo('lshw -C disk', pty=False, shell=False)
_host_lshw_output[ip] = output
return _parse_lshw_output(output, blockdev)
def get_disk_size_serial(ip, blockdev):
with hide('running', 'stdout', 'stderr'):
out = execute(_fab_get_disk_size_serial, ip, blockdev, hosts=[ip])
return out[ip]
def get_devices(zones, metadata=None):
devices = {}
for builder in RING_TYPES:
devices[builder] = {}
for zone, nodes in zones.iteritems():
devices[builder][zone] = []
for node, disks in nodes.iteritems():
ringdisks = []
# Add all disks designated for ringtype
if isinstance(disks['disks'], dict):
if builder in disks['disks']:
ringdisks += disks['disks'][builder]
elif isinstance(disks['disks'], list):
ringdisks = disks['disks']
for ringdisk in ringdisks:
device = {}
device['weight'] = None
device['metadata'] = metadata
device['device'] = None
device['ip'] = node
if not isinstance(ringdisk, dict):
device['device'] = ringdisk
match = re.match('(.*)\d+$', ringdisk)
blockdev = '/dev/%s' % match.group(1)
# treat size as weight and serial as metadata
weight, serial = get_disk_size_serial(node, blockdev)
device['weight'] = weight
if not metadata:
device['metadata'] = serial
else:
device['device'] = ringdisk['blockdev']
device['weight'] = ringdisk['weight']
if 'metadata' in ringdisk:
device['metadata'] = ringdisk['metadata']
devices[builder][zone].append(device)
return devices
| {
"content_hash": "fe4e07218e0922f02025f0da5905e9ac",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 74,
"avg_line_length": 35.56382978723404,
"alnum_prop": 0.48818426562967393,
"repo_name": "blueboxgroup/swifttool",
"id": "451338938553073a64dbddeae154fd5f550a3034",
"size": "4037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swifttool/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29113"
}
],
"symlink_target": ""
} |
import pygame
import time as timer
import socket
import sys
#INITILIZE WINDOW AND CONSOLE
pygame.init()
for x in range(0, 20):
print " "
print(" ____ _ ____ \n| _ \ ___ _ __ ___ ___ | |_ ___ / ___|__ _ _ __ \n| |_) / _ \ '_ ` _ \ / _ \| __/ _ \ | | / _` | '__|\n| _ < __/ | | | | | (_) | || __/ | |__| (_| | | \n|_| \_\___|_| |_| |_|\___/ \__\___| \____\__,_|_| \n")
IMGDIR = "CarIMG/"
bootstrapArgs = sys.argv
#Define Keys for Input
FW = pygame.K_w
BK = pygame.K_s
LT = pygame.K_a
LC = pygame.K_c
LR = pygame.K_b
RT = pygame.K_d
RC = pygame.K_v
RR = pygame.K_n
SU = pygame.K_UP
SD = pygame.K_DOWN
ES = pygame.K_m
CS = pygame.K_RETURN
#DEFINE VARIABLES FOR WINDOW
display_width = 400
display_height = 600
gameDisplay = pygame.display.set_mode((display_width,display_height), pygame.HWSURFACE)
true = True
false = False
black = (0,0,0)
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
IPC = 0
IP = bootstrapArgs[1]
#IP = "127.0.0.1"
#IP = raw_input('What is the Raspberry Pi IP you are trying to connect to?')
#DEFINE FUNCTIONS
def isIPv4(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def start():
print "Logging into Pi, At IP: " + IP
if (isIPv4(IP)):
BUFFER_SIZE = 1024
MESSAGE = "RUPi"
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((IP, 45949))
s.send(MESSAGE)
data = s.recv(BUFFER_SIZE)
except socket.error:
print "Failed Connecting to Pi. Please restart this script and launch the script on your Pi"
pygame.quit()
quit()
if(data != "Yes"):
print "Failed Connecting to Pi. Please restart this script and enter a valid IP address"
s.close()
pygame.quit()
quit()
else:
print "Failed. Please restart this script and enter a valid IP address"
pygame.quit()
quit()
pygame.display.set_caption('Raspberry Pi Controler - Listening - IP: '+IP)
s.close()
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def showText(text,size,line,x,y,useFormula):
largeText = pygame.font.SysFont('Arial',size)
TextSurf, TextRect = text_objects(text, largeText)
if (useFormula == 1):
TextRect.center = ((display_width-(display_width-50)+x),(display_height-(display_height-50)+(line*size*2))+y)
else:
TextRect.center = (x,y)
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
def updateScreen():
gameDisplay.fill(white)
showText("Key Mapping",25,0,10,10,true)
showText("Forward : W",14,1,10,10,true)
showText("Back : S",14,2,10,10,true)
showText("Turn Left : A",14,3,10,10,true)
showText("Curve Left : C",14,4,10,10,true)
showText("Rotate Left : B",14,5,10,10,true)
showText("Turn Right : D",14,6,10,10,true)
showText("Curve Right : V",14,7,10,10,true)
showText("Rotate Right : N",14,8,10,10,true)
showText("EStop : M",14,9,10,10,true)
showText("Slow Stop : RETURN",14,10,10,10,true)
showText("Speed Up : UP",14,11,10,10,true)
showText("Speed Down : DOWN",14,12,10,10,true)
pygame.display.update()
def sendInstruction(ltrsSend):
doImage = true
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((IP, 45949))
s.send(ltrsSend)
s.close()
#if(ltrsSend == "KEYUP"):
# doImage = false
#elif(ltrsSend == "STOP"):
# doImage = false
#elif(ltrsSend == "SU"):
# doImage = false
#elif(ltrsSend == "SD"):
# doImage = false
#elif(ltrsSend == "IPC"):
# doImage = false
#if(doImage == true):
# gameDisplay.blit(images[ltrsSend],(0,0))
def testIPCNew():
sa = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sa.connect((IP, 45949))
sendInstruction("IPC")
IPC = int(sa.recv(1045))
start()
updateScreen()
while not crashed:
# render text
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
############################
if event.type == pygame.KEYDOWN:
if event.key == FW:
print "Forward"
sendInstruction("FW")
elif event.key == BK:
print "Back"
sendInstruction("BK")
elif event.key == LT:
print "Left Turn"
sendInstruction("LT")
elif event.key == RT:
print "Right Turn"
sendInstruction("RT")
elif event.key == SU:
print "Speed Up"
sendInstruction("SU")
elif event.key == SD:
print "Speed Down"
sendInstruction("SD")
elif event.key == ES:
print "EStop"
sendInstruction("ES")
elif event.key == CS:
print "Slow Stop"
sendInstruction("CS")
elif event.key == LC:
print "Left Curve"
sendInstruction("LC")
elif event.key == LR:
print "Left Rotate"
sendInstruction("LR")
elif event.key == RC:
print "Right Curve"
sendInstruction("RC")
elif event.key == RR:
print "Right Rotate"
sendInstruction("RR")
#testIPCNew()
updateScreen()
if event.type == pygame.KEYUP:
#testIPCNew()
updateScreen()
sendInstruction("KEYUP")
######################
##
##
clock.tick(120)
sendInstruction("STOP")
pygame.quit()
quit()
| {
"content_hash": "79f842dbeef3d23ba5f1078a75d3dea9",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 284,
"avg_line_length": 28.193693693693692,
"alnum_prop": 0.5230867550726953,
"repo_name": "ComputerCandy/PiPyBot",
"id": "876b3b5a8dbe5a9b9ba0c34ef9dfbd174610bfba",
"size": "6259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Client/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "98"
},
{
"name": "Python",
"bytes": "97570"
}
],
"symlink_target": ""
} |
"""
CacheManager provides encapsulation to the caching mechanism used in GGRC
for resource collection.
"""
from cache import all_cache_entries, all_mapping_entries
class CacheManager:
"""Cache manager provides encapsulation to caching mechanism such as
Memcache.
Attributes:
cache: Ordered dictionary of all cache objects derived from base class
(Cache)
supported_classes: Model plural table name for a supported resource type
supported_mappings: Mapping entry tuples for a supported resource type
factory: Factory class to create cache object
new, dirty, deleted: temporary dictionaries used in session event listeners
before and after flush
marked_for_<op>: dictionaries used in session event listeners after flush,
before and after commit
Returns:
None
"""
factory = None
def __init__(self):
pass
def initialize(self, cache):
"""Initialize Cache Manager, configure cache mechanism."""
self.supported_classes = {}
for cache_entry in all_cache_entries():
self.supported_classes[cache_entry.class_name] = cache_entry.model_plural
self.supported_mappings = {}
for mapping_entry in all_mapping_entries():
self.supported_mappings.setdefault(mapping_entry.class_name, [])
self.supported_mappings[mapping_entry.class_name].append(mapping_entry)
self.cache_object = cache
self.new = {}
self.dirty = {}
self.deleted = {}
self.marked_for_add = {}
self.marked_for_update = {}
self.marked_for_delete = []
def get_collection(self, category, resource, filter):
"""Get collection from cache.
Args:
category: collection or stub
resource: regulation, controls, etc.
filter: dictionary containing ids and optional attrs
Returns:
JSON string representation
"""
if not self.is_caching_supported(category, resource, filter,
'get_collection'):
return None
ret = self.cache_object.get(category, resource, filter)
return ret
def add_collection(self, category, resource, data, expiration_time=0):
"""Add collection in cache.
Args:
category: collection or stub
resource: regulation, controls, etc.
data: dictionary containing ids and attrs to update
Returns:
DTO formatted string, e.g. JSON string representation
"""
if not self.is_caching_supported(category, resource, data,
'add_collection'):
return None
ret = self.cache_object.add(category, resource, data, expiration_time)
return ret
def update_collection(self, category, resource, data, expiration_time=0):
"""Update collection in cache.
Args:
category: collection or stub
resource: regulation, controls, etc.
data: dictionary containing ids and attrs to update
Returns:
JSON string representation
"""
if not self.is_caching_supported(category, resource, data,
'update_collection'):
return None
ret = self.cache_object.update(category, resource, data, expiration_time)
return ret
def delete_collection(self, category, resource, data, lockadd_seconds=0):
"""Delete collection from cache.
Args:
category: collection or stub
resource: regulation, controls, etc.
data: dictionary containing ids of the resource to delete
Returns:
JSON string representation
"""
if not self.is_caching_supported(category, resource, data,
'delete_collection'):
return None
ret = self.cache_object.remove(category, resource, data, lockadd_seconds)
return ret
def is_caching_supported(self, category, resource, data=None,
operation=None):
"""Check if caching is supported.
Args:
category: collection or stub
resource: regulation, controls, etc.
data: additional data such as context information, Default is None
operation: operation performed such as get, update, delete or add
Returns:
True if caching is supported for the category and resource.
False otherwise
"""
# TODO(dan): Leverage policy manager to apply configured policies
#
if self.cache_object.is_caching_supported(category, resource):
return True
else:
return False
def bulk_get(self, data):
"""Perform Bulk Get operations in cache for specified data.
Args:
data: keys for bulk get
Returns:
Result of cache get_multi
"""
return self.cache_object.get_multi(data)
def bulk_add(self, data, expiration_time=0):
"""Perform Bulk Add operations in cache for specified data.
Args:
data: keys for bulk add
Returns:
Result of cache add_multi
"""
return self.cache_object.add_multi(data, expiration_time)
def bulk_update(self, data, expiration_time=0):
"""Perform Bulk update operations in cache for specified data.
Does a bulk get on all the items in data and then performs bulk update only
for items present in cache.
Args:
data: keys for bulk update
Returns:
Result of cache update_multi
"""
get_result = self.cache_object.get_multi(data.keys())
for data_key, data_value in data.items():
for update_key, update_value in data_value.items():
if data_key in get_result:
get_result[data_key][update_key] = update_value
return self.cache_object.update_multi(get_result, expiration_time)
def bulk_delete(self, data, lockadd_seconds):
"""Perform Bulk Delete operations in cache for specified data.
Args:
data: keys for bulk delete
Returns:
Result of cache remove_multi
"""
return self.cache_object.remove_multi(data, lockadd_seconds)
def clean(self):
"""Cleanup cache manager resources."""
self.cache_object.clean()
return True
def clear_cache(self):
"""Clear temporary dictionaries used for cache operations."""
self.new = {}
self.dirty = {}
self.deleted = {}
self.marked_for_add = {}
self.marked_for_update = {}
self.marked_for_delete = []
| {
"content_hash": "c240f8bc5ffe0092217c6c703ca0ffc8",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 31.11,
"alnum_prop": 0.660559305689489,
"repo_name": "edofic/ggrc-core",
"id": "18156b846d6741c988d9e6eed4322413784d6344",
"size": "6336",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "src/ggrc/cache/cachemanager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "191076"
},
{
"name": "Cucumber",
"bytes": "136322"
},
{
"name": "HTML",
"bytes": "1069698"
},
{
"name": "JavaScript",
"bytes": "1704619"
},
{
"name": "Makefile",
"bytes": "7103"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2385925"
},
{
"name": "Shell",
"bytes": "30802"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import xml.etree.ElementTree as ET
from flask import request, current_app, abort, flash
from sqlalchemy.orm.exc import NoResultFound
from flask_wtf import Form
from flask_login import current_user
from oauthlib.oauth2 import OAuth2Error
from .oauth import OAuthMethod, OAuthUser
from .. import db
from .models import Group, Pilot
from ..util.fields import ImageField
from ..util.crest import check_crest_response
from ..versioned_static import static_file
class EveSSO(OAuthMethod):
def __init__(self, singularity=False, **kwargs):
kwargs.setdefault('base_url', u'')
if singularity:
self.domain = 'https://sisilogin.testeveonline.com'
self.xml_root = 'https://api.testeveonline.com/'
else:
self.domain = 'https://login.eveonline.com'
self.xml_root = 'https://api.eveonline.com/'
kwargs.setdefault('access_token_url', self.domain + '/oauth/token')
kwargs.setdefault('authorize_url', self.domain + '/oauth/authorize')
kwargs.setdefault('refresh_token_url', self.domain + '/oauth/token')
kwargs.setdefault('scope', ['publicData'])
kwargs.setdefault('method', 'POST')
kwargs.setdefault('app_key', 'EVE_SSO')
kwargs.setdefault('name', u'EVE SSO')
super(EveSSO, self).__init__(**kwargs)
def _get_user_data(self):
if not hasattr(request, '_user_data'):
try:
resp = self.session.get(self.domain + '/oauth/verify').json()
current_app.logger.debug(u"SSO lookup results: {}".format(resp))
except OAuth2Error as e:
current_app.logger.error(u"Error verifying user data for user "
u"'{}': {}".format(current_user, e))
# The session can be bugged in some situations. Kill it to be
# sure.
del self.session
raise
try:
char_data = {
'name': resp[u'CharacterName'],
'id': resp[u'CharacterID'],
'owner_hash': resp[u'CharacterOwnerHash'],
}
request._user_data = char_data
except (TypeError, KeyError):
abort(500, u"Error in receiving EVE SSO response: {}".format(
resp))
return request._user_data
def form(self):
class EveSSOForm(Form):
submit = ImageField(src=static_file('evesso.png'),
alt=u"Log in with EVE Online")
return EveSSOForm
def get_user(self):
character = self._get_user_data()
try:
user = EveSSOUser.query.filter_by(
owner_hash=character['owner_hash'],
authmethod=self.name).one()
except NoResultFound:
user = EveSSOUser(
character['name'],
character['owner_hash'],
self.name)
db.session.add(user)
db.session.commit()
return user
def get_pilots(self):
# The EVE SSO API only authenticates one character at a time, so we're
# going to have a 1-to-1 mapping of Users to Pilots
character = self._get_user_data()
pilot = Pilot.query.get(int(character['id']))
if pilot is None:
pilot = Pilot(None, character['name'], character['id'])
db.session.add(pilot)
db.session.commit()
return [pilot]
def get_groups(self):
"""Set the user's groups for their pilot.
At this time, Eve SSO only gives us character access, so they're just
set to the pilot's corporation, and if they have on their alliance as
well. In the future, this method may also add groups for mailing lists.
"""
character = self._get_user_data()
info_url = self.xml_root + 'eve/CharacterInfo.xml.aspx'
info_response = current_app.requests_session.get(info_url,
params={'characterID': character['id']})
api_tree = ET.fromstring(info_response.text).find('result')
corp_name = api_tree.find('corporation')
corp_id = api_tree.find('corporationID')
corporation = {
'name': corp_name.text,
'id': int(corp_id.text),
}
try:
corp_group = EveSSOGroup.query.filter_by(alliance=False,
ccp_id=int(corp_id.text),
authmethod=self.name).one()
except NoResultFound:
corp_group = EveSSOGroup(corp_name.text, int(corp_id.text), False,
self.name)
db.session.add(corp_group)
groups = [corp_group]
alliance_name = api_tree.find('alliance')
alliance_id = api_tree.find('allianceID')
# If there's an alliance, set it up
if alliance_name is not None and alliance_id is not None:
try:
alliance_group = EveSSOGroup.query.filter_by(alliance=True,
ccp_id=int(alliance_id.text),
authmethod=self.name).one()
except NoResultFound:
alliance_group = EveSSOGroup(alliance_name.text,
int(alliance_id.text), True, self.name)
db.session.add(alliance_group)
groups.append(alliance_group)
db.session.commit()
return groups
class EveSSOUser(OAuthUser):
id = db.Column(db.Integer, db.ForeignKey(OAuthUser.id), primary_key=True)
owner_hash = db.Column(db.String(50), nullable=False, unique=True,
index=True)
def __init__(self, username, owner_hash, authmethod, groups=None, **kwargs):
self.owner_hash = owner_hash
super(EveSSOUser, self).__init__(username, authmethod, **kwargs)
class EveSSOGroup(Group):
id = db.Column(db.Integer, db.ForeignKey(Group.id), primary_key=True)
ccp_id = db.Column(db.Integer, nullable=False, unique=True, index=True)
alliance = db.Column(db.Boolean(name='alliance'), nullable=False, default=False,
index=True)
__table_args__ = (
db.UniqueConstraint(ccp_id, alliance, name='alliance_ccp_id'),
)
def __init__(self, name, ccp_id, alliance, authmethod, **kwargs):
self.ccp_id = ccp_id
self.alliance = alliance
super(EveSSOGroup, self).__init__(name, authmethod, **kwargs)
| {
"content_hash": "e66e4191071d73208d3a54295a8c819d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 84,
"avg_line_length": 38.82634730538922,
"alnum_prop": 0.5780382479950648,
"repo_name": "paxswill/evesrp",
"id": "e7672e502a2bf145b7eee3fa65285c346d929a1a",
"size": "6484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/evesrp/auth/evesso.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "799"
},
{
"name": "CoffeeScript",
"bytes": "76643"
},
{
"name": "HTML",
"bytes": "68889"
},
{
"name": "JavaScript",
"bytes": "532"
},
{
"name": "Makefile",
"bytes": "11752"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "843460"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import CounterField, JSONField
from djblets.db.query import get_object_or_none
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.managers import ReviewManager
from reviewboard.reviews.models.base_comment import BaseComment
from reviewboard.reviews.models.diff_comment import Comment
from reviewboard.reviews.models.file_attachment_comment import \
FileAttachmentComment
from reviewboard.reviews.models.review_request import (ReviewRequest,
fetch_issue_counts)
from reviewboard.reviews.models.screenshot_comment import ScreenshotComment
from reviewboard.reviews.signals import (reply_publishing, reply_published,
review_publishing, review_published)
@python_2_unicode_compatible
class Review(models.Model):
"""A review of a review request."""
SHIP_IT_TEXT = 'Ship It!'
review_request = models.ForeignKey(ReviewRequest,
related_name="reviews",
verbose_name=_("review request"))
user = models.ForeignKey(User, verbose_name=_("user"),
related_name="reviews")
timestamp = models.DateTimeField(_('timestamp'), default=timezone.now)
public = models.BooleanField(_("public"), default=False)
ship_it = models.BooleanField(
_("ship it"),
default=False,
help_text=_("Indicates whether the reviewer thinks this code is "
"ready to ship."))
base_reply_to = models.ForeignKey(
"self",
blank=True,
null=True,
related_name="replies",
verbose_name=_("Base reply to"),
help_text=_("The top-most review in the discussion thread for "
"this review reply."))
email_message_id = models.CharField(_("e-mail message ID"), max_length=255,
blank=True, null=True)
time_emailed = models.DateTimeField(_("time e-mailed"), null=True,
default=None, blank=True)
body_top = models.TextField(
_("body (top)"),
blank=True,
help_text=_("The review text shown above the diff and screenshot "
"comments."))
body_top_rich_text = models.BooleanField(
_("body (top) in rich text"),
default=False)
body_bottom = models.TextField(
_("body (bottom)"),
blank=True,
help_text=_("The review text shown below the diff and screenshot "
"comments."))
body_bottom_rich_text = models.BooleanField(
_("body (bottom) in rich text"),
default=False)
body_top_reply_to = models.ForeignKey(
"self", blank=True, null=True,
related_name="body_top_replies",
verbose_name=_("body (top) reply to"),
help_text=_("The review that the body (top) field is in reply to."))
body_bottom_reply_to = models.ForeignKey(
"self", blank=True, null=True,
related_name="body_bottom_replies",
verbose_name=_("body (bottom) reply to"),
help_text=_("The review that the body (bottom) field is in reply to."))
comments = models.ManyToManyField(Comment, verbose_name=_("comments"),
related_name="review", blank=True)
screenshot_comments = models.ManyToManyField(
ScreenshotComment,
verbose_name=_("screenshot comments"),
related_name="review",
blank=True)
file_attachment_comments = models.ManyToManyField(
FileAttachmentComment,
verbose_name=_("file attachment comments"),
related_name="review",
blank=True)
extra_data = JSONField(null=True)
# Deprecated and no longer used for new reviews as of 2.0.9.
rich_text = models.BooleanField(_("rich text"), default=False)
# XXX Deprecated. This will be removed in a future release.
reviewed_diffset = models.ForeignKey(
DiffSet, verbose_name="Reviewed Diff",
blank=True, null=True,
help_text=_("This field is unused and will be removed in a future "
"version."))
# Set this up with a ReviewManager to help prevent race conditions and
# to fix duplicate reviews.
objects = ReviewManager()
@cached_property
def ship_it_only(self):
"""Return if the review only contains a "Ship It!".
Returns:
bool: ``True`` if the review is only a "Ship It!" and ``False``
otherwise.
"""
return (self.ship_it and
(not self.body_top or
self.body_top == Review.SHIP_IT_TEXT) and
not (self.body_bottom or
self.comments.exists() or
self.file_attachment_comments.exists() or
self.screenshot_comments.exists()))
def get_participants(self):
"""Returns a list of participants in a review's discussion."""
# This list comprehension gives us every user in every reply,
# recursively. It looks strange and perhaps backwards, but
# works. We do it this way because get_participants gives us a
# list back, which we can't stick in as the result for a
# standard list comprehension. We could opt for a simple for
# loop and concetenate the list, but this is more fun.
return [self.user] + \
[u for reply in self.replies.all()
for u in reply.participants]
participants = property(get_participants)
def is_accessible_by(self, user):
"""Returns whether the user can access this review."""
return ((self.public or self.user == user or user.is_superuser) and
self.review_request.is_accessible_by(user))
def is_mutable_by(self, user):
"""Returns whether the user can modify this review."""
return ((not self.public and
(self.user == user or user.is_superuser)) and
self.review_request.is_accessible_by(user))
def __str__(self):
return "Review of '%s'" % self.review_request
def is_reply(self):
"""Returns whether or not this review is a reply to another review."""
return self.base_reply_to_id is not None
is_reply.boolean = True
def public_replies(self):
"""Returns a list of public replies to this review."""
return self.replies.filter(public=True)
def public_body_top_replies(self, user=None):
"""Returns a list of public replies to this review's body top."""
if hasattr(self, '_body_top_replies'):
return self._body_top_replies
else:
q = Q(public=True)
if user:
q = q | Q(user=user)
return self.body_top_replies.filter(q)
def public_body_bottom_replies(self, user=None):
"""Returns a list of public replies to this review's body bottom."""
if hasattr(self, '_body_bottom_replies'):
return self._body_bottom_replies
else:
q = Q(public=True)
if user:
q = q | Q(user=user)
return self.body_bottom_replies.filter(q)
def get_pending_reply(self, user):
"""Returns the pending reply owned by the specified user."""
if user.is_authenticated():
return get_object_or_none(Review,
user=user,
public=False,
base_reply_to=self)
return None
def save(self, **kwargs):
self.timestamp = timezone.now()
super(Review, self).save()
def publish(self, user=None, trivial=False, to_submitter_only=False):
"""Publishes this review.
This will make the review public and update the timestamps of all
contained comments.
"""
if not user:
user = self.user
self.public = True
if self.is_reply():
reply_publishing.send(sender=self.__class__, user=user, reply=self)
else:
review_publishing.send(sender=self.__class__, user=user,
review=self)
self.save()
self.comments.update(timestamp=self.timestamp)
self.screenshot_comments.update(timestamp=self.timestamp)
self.file_attachment_comments.update(timestamp=self.timestamp)
# Update the last_updated timestamp and the last review activity
# timestamp on the review request.
self.review_request.last_review_activity_timestamp = self.timestamp
self.review_request.save(
update_fields=['last_review_activity_timestamp', 'last_updated'])
if self.is_reply():
reply_published.send(sender=self.__class__,
user=user, reply=self, trivial=trivial)
else:
issue_counts = fetch_issue_counts(self.review_request,
Q(pk=self.pk))
# Since we're publishing the review, all filed issues should be
# open.
assert issue_counts[BaseComment.RESOLVED] == 0
assert issue_counts[BaseComment.DROPPED] == 0
if self.ship_it:
ship_it_value = 1
else:
ship_it_value = 0
# Atomically update the issue count and Ship It count.
CounterField.increment_many(
self.review_request,
{
'issue_open_count': issue_counts[BaseComment.OPEN],
'issue_dropped_count': 0,
'issue_resolved_count': 0,
'shipit_count': ship_it_value,
})
review_published.send(sender=self.__class__,
user=user, review=self,
to_submitter_only=to_submitter_only)
def delete(self):
"""Deletes this review.
This will enforce that all contained comments are also deleted.
"""
self.comments.all().delete()
self.screenshot_comments.all().delete()
self.file_attachment_comments.all().delete()
super(Review, self).delete()
def get_absolute_url(self):
return "%s#review%s" % (self.review_request.get_absolute_url(),
self.pk)
def get_all_comments(self, **kwargs):
"""Return a list of all contained comments of all types."""
return (list(self.comments.filter(**kwargs)) +
list(self.screenshot_comments.filter(**kwargs)) +
list(self.file_attachment_comments.filter(**kwargs)))
class Meta:
app_label = 'reviews'
ordering = ['timestamp']
get_latest_by = 'timestamp'
| {
"content_hash": "ea36e525f0602e569338a6f0da93c65c",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 79,
"avg_line_length": 38.6426116838488,
"alnum_prop": 0.5873721654068474,
"repo_name": "KnowNo/reviewboard",
"id": "832783582655d29e1c1ebb96190486dbd6170724",
"size": "11245",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "reviewboard/reviews/models/review.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212721"
},
{
"name": "HTML",
"bytes": "179427"
},
{
"name": "JavaScript",
"bytes": "1463002"
},
{
"name": "Python",
"bytes": "3686127"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
from .resource import Resource
class ApplicationGatewayFirewallRuleSet(Resource):
"""A web application firewall rule set.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param provisioning_state: The provisioning state of the web application
firewall rule set.
:type provisioning_state: str
:param rule_set_type: Required. The type of the web application firewall
rule set.
:type rule_set_type: str
:param rule_set_version: Required. The version of the web application
firewall rule set type.
:type rule_set_version: str
:param rule_groups: Required. The rule groups of the web application
firewall rule set.
:type rule_groups:
list[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayFirewallRuleGroup]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'rule_set_type': {'required': True},
'rule_set_version': {'required': True},
'rule_groups': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'rule_set_type': {'key': 'properties.ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'properties.ruleSetVersion', 'type': 'str'},
'rule_groups': {'key': 'properties.ruleGroups', 'type': '[ApplicationGatewayFirewallRuleGroup]'},
}
def __init__(self, *, rule_set_type: str, rule_set_version: str, rule_groups, id: str=None, location: str=None, tags=None, provisioning_state: str=None, **kwargs) -> None:
super(ApplicationGatewayFirewallRuleSet, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.provisioning_state = provisioning_state
self.rule_set_type = rule_set_type
self.rule_set_version = rule_set_version
self.rule_groups = rule_groups
| {
"content_hash": "6b6ee10c43239132ef2cd55a4e7df99a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 175,
"avg_line_length": 40.596774193548384,
"alnum_prop": 0.6324990067540723,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "5df851b1412d1977be4d2971b524abc3635f7165",
"size": "2991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/application_gateway_firewall_rule_set_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import mox
import os
import ConfigParser
import random
import shutil
from nappingcat import config
class TestOfConfig(TestCase):
def setUp(self):
self.mox = mox.Mox()
self.filename = 'tests/.%d.conf' % random.randint(1,100)
def tearDown(self):
self.mox.UnsetStubs()
if os.path.exists(self.filename):
os.remove(self.filename)
def test_reads_from_appropriate_path_and_file(self):
f = open(self.filename, 'w')
print >>f,"""
[kittyconfig]
blah=3
""".strip()
f.flush()
f.close()
self.mox.StubOutWithMock(os.path, 'expanduser')
os.path.expanduser(mox.IsA(str)).AndReturn(self.filename)
self.mox.ReplayAll()
results = config.build_settings()
self.assertTrue(isinstance(results, ConfigParser.ConfigParser))
self.mox.VerifyAll()
def test_raises_parsingerror_with_bad_config(self):
f = open(self.filename, 'w')
print >>f,"""
[kittyconfig
blah=3
anrranl;faksdj
""".strip()
f.flush()
f.close()
self.mox.StubOutWithMock(os.path, 'expanduser')
os.path.expanduser(mox.IsA(str)).AndReturn(self.filename)
self.mox.ReplayAll()
self.assertRaises(ConfigParser.Error, config.build_settings)
self.mox.VerifyAll()
| {
"content_hash": "567aa11ecbe3b33bdd3f9f2ae91c2da5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 71,
"avg_line_length": 27.653061224489797,
"alnum_prop": 0.6302583025830258,
"repo_name": "chrisdickinson/nappingcat",
"id": "2a0274945f072934fb14909174015747ed66d9ea",
"size": "1355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "71011"
}
],
"symlink_target": ""
} |
import pytest
import unittest
from modules.sfp_talosintel import sfp_talosintel
from sflib import SpiderFoot
@pytest.mark.usefixtures
class TestModuleTalosintel(unittest.TestCase):
def test_opts(self):
module = sfp_talosintel()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_talosintel()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_talosintel()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_talosintel()
self.assertIsInstance(module.producedEvents(), list)
| {
"content_hash": "b664ff85023c66f9cd41af6959779766",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 28.73076923076923,
"alnum_prop": 0.7001338688085676,
"repo_name": "smicallef/spiderfoot",
"id": "10a888ca5ecc32bd0df7a50277bede65a5d0bd00",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/modules/test_sfp_talosintel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
"""Module for testing the add dns environment command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateDnsEnvironment(TestBrokerCommand):
def test_100_update_comments(self):
self.noouttest(["update_dns_environment", "--dns_environment", "ut-env",
"--comments", "New DNS env comments"])
def test_105_verify_update(self):
command = ["show", "dns", "environment", "--dns_environment", "ut-env"]
out = self.commandtest(command)
self.matchoutput(out, "DNS Environment: ut-env", command)
self.matchoutput(out, "Comments: New DNS env comments", command)
def test_110_clear_comments(self):
self.noouttest(["update_dns_environment", "--dns_environment", "ut-env",
"--comments", ""])
def test_115_verify_comments(self):
command = ["show", "dns", "environment", "--dns_environment", "ut-env"]
out = self.commandtest(command)
self.matchclean(out, "Comments", command)
def test_200_update_nonexistent(self):
command = ["update", "dns", "environment",
"--dns_environment", "no-such-env"]
out = self.notfoundtest(command)
self.matchoutput(out, "DNS Environment no-such-env not found.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateDnsEnvironment)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "eac650c962d15b30a79abcd94cb132b9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 81,
"avg_line_length": 37.41463414634146,
"alnum_prop": 0.6323337679269883,
"repo_name": "quattor/aquilon",
"id": "305a3e69dfa6b0ef48523bc81b907152953b7f09",
"size": "2249",
"binary": false,
"copies": "2",
"ref": "refs/heads/upstream",
"path": "tests/broker/test_update_dns_environment.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
} |
import pytest
import netmiko
import time
from DEVICE_CREDS import *
def setup_module(module):
module.EXPECTED_RESPONSES = {
'base_prompt' : 'root@pynet-jnpr-srx1',
'router_prompt' : 'root@pynet-jnpr-srx1>',
'router_conf_mode' : 'root@pynet-jnpr-srx1#',
'interface_ip' : '10.220.88.39',
}
show_ver_command = 'show version'
multiple_line_command = 'show configuration'
module.basic_command = 'show interfaces terse'
SSHClass = netmiko.ssh_dispatcher(juniper_srx['device_type'])
net_connect = SSHClass(**juniper_srx)
module.show_version = net_connect.send_command(show_ver_command)
module.multiple_line_output = net_connect.send_command(multiple_line_command, delay_factor=2)
module.show_ip = net_connect.send_command(module.basic_command)
module.base_prompt = net_connect.base_prompt
# Test buffer clearing
net_connect.remote_conn.send(show_ver_command)
time.sleep(2)
net_connect.clear_buffer()
# Should not be anything there on the second pass
module.clear_buffer_check = net_connect.clear_buffer()
def test_disable_paging():
'''
Verify paging is disabled by looking for string after when paging would
normally occur
'''
assert 'security-zone untrust' in multiple_line_output
def test_verify_ssh_connect():
'''
Verify the connection was established successfully
'''
assert 'JUNOS Software Release' in show_version
def test_verify_send_command():
'''
Verify a command can be sent down the channel successfully
'''
assert EXPECTED_RESPONSES['interface_ip'] in show_ip
def test_base_prompt():
'''
Verify the router base_prompt is detected correctly
'''
assert base_prompt == EXPECTED_RESPONSES['base_prompt']
def test_strip_prompt():
'''
Ensure the router prompt is not in the command output
'''
assert EXPECTED_RESPONSES['base_prompt'] not in show_version
def test_strip_command():
'''
Ensure that the command that was executed does not show up in the
command output
'''
assert basic_command not in show_ip
def test_normalize_linefeeds():
'''
Ensure no '\r' sequences
'''
assert not '\r' in show_ip
def test_clear_buffer():
'''
Test that clearing the buffer works
'''
assert clear_buffer_check is None
| {
"content_hash": "8d09847d4d95c18c5f3ae8b5c5bac8bf",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 97,
"avg_line_length": 25.91304347826087,
"alnum_prop": 0.6669463087248322,
"repo_name": "nvoron23/netmiko",
"id": "3d735c139db1753984fbaf369ded9f6d9d2e313b",
"size": "2407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_juniper_srx.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72480"
}
],
"symlink_target": ""
} |
from os.path import expanduser
HOME = expanduser('~')
ROOT_PATH = HOME + '/Dropbox/Projeto/Entropia/Fidelidade'
TWITTER_DATA_PATH = HOME + "/Twitter.Info"
FRIENDS_INFO_PATH = TWITTER_DATA_PATH + '/Friends.Info'
INTERACTIONS_INFO_PATH = TWITTER_DATA_PATH + '/Interactions.Info'
FILE_NAMES = ['interacoesLikes', 'interacoesLikesEspanhol', 'interacoesLikesIngles', 'interacoesLikesPortugues',
'interacoesMencoes', 'interacoesMencoesEspanhol', 'interacoesMencoesIngles', 'interacoesMencoesPortugues',
'interacoesRetweets', 'interacoesRetweetsEspanhol', 'interacoesRetweetsIngles',
'interacoesRetweetsPortugues', 'interacoesUniao', 'interacoesUniaoEspanhol', 'interacoesUniaoIngles',
'interacoesUniaoPortugues']
MAX_NUMBER_OF_INTERVALS = 26
| {
"content_hash": "f33b41e9e3276893b1df99012ebf6ca9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 120,
"avg_line_length": 42.05263157894737,
"alnum_prop": 0.737171464330413,
"repo_name": "jblupus/PyLoyaltyProject",
"id": "4f0fcd45ccfaaa60245cc0f68536214c93b23f8e",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/project.backup/constants/constants.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "312869"
}
],
"symlink_target": ""
} |
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'),
'tax_calculation_rounding_method': fields.selection([
('round_per_line', 'Round per Line'),
('round_globally', 'Round Globally'),
], 'Tax Calculation Rounding Method',
help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."),
'overdue_msg': fields.text('Overdue Payments Message', translate=True),
}
_defaults = {
'expects_chart_of_accounts': True,
'tax_calculation_rounding_method': 'round_per_line',
'overdue_msg': '''Dear Sir/Madam,
Our records indicate that some payments on your account are still due. Please find details below.
If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below.
If you have any queries regarding your account, Please contact us.
Thank you in advance for your cooperation.
Best Regards,'''
}
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "3bdbe4b78e519dbb2c7af6f717359a1b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 610,
"avg_line_length": 59.645161290322584,
"alnum_prop": 0.7133585722011898,
"repo_name": "ntiufalara/openerp7",
"id": "a4624ae5ad5ef247281c2058b2c723b9ef74c4d7",
"size": "2828",
"binary": false,
"copies": "55",
"ref": "refs/heads/master",
"path": "openerp/addons/account/company.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import __main__
class Event(object):
"""
Event is a simple publish/subscribe based event dispatcher. It's a way
to add, or take advantage of, hooks in your application. If you want to
tie actions in with lower level classes you're developing within your
application, you can set events to fire, and then subscribe to them with
callback methods in other methods in your application.
It sets itself to the __main__ function. In order to use it,
you must import it with your __main__ method, and make sure
you import __main__ and it's accessible for the methods where
you want to use it.
For example, from sessions.py
# if the event class has been loaded, fire off the sessionDeleted event
if u"AEU_Events" in __main__.__dict__:
__main__.AEU_Events.fire_event(u"sessionDelete")
You can the subscribe to session delete events, adding a callback
if u"AEU_Events" in __main__.__dict__:
__main__.AEU_Events.subscribe(u"sessionDelete", clear_user_session)
"""
def __init__(self):
self.events = []
def subscribe(self, event, callback, args = None):
"""
This method will subscribe a callback function to an event name.
Args:
event: The event to subscribe to.
callback: The callback method to run.
args: Optional arguments to pass with the callback.
Returns True
"""
if not {"event": event, "callback": callback, "args": args, } \
in self.events:
self.events.append({"event": event, "callback": callback, \
"args": args, })
return True
def unsubscribe(self, event, callback, args = None):
"""
This method will unsubscribe a callback from an event.
Args:
event: The event to subscribe to.
callback: The callback method to run.
args: Optional arguments to pass with the callback.
Returns True
"""
if {"event": event, "callback": callback, "args": args, }\
in self.events:
self.events.remove({"event": event, "callback": callback,\
"args": args, })
return True
def fire_event(self, event = None):
"""
This method is what a method uses to fire an event,
initiating all registered callbacks
Args:
event: The name of the event to fire.
Returns True
"""
for e in self.events:
if e["event"] == event:
if type(e["args"]) == type([]):
e["callback"](*e["args"])
elif type(e["args"]) == type({}):
e["callback"](**e["args"])
elif e["args"] == None:
e["callback"]()
else:
e["callback"](e["args"])
return True
"""
Assign to the event class to __main__
"""
__main__.AEU_Events = Event()
| {
"content_hash": "54667f5814e7923cf82c855365d519ac",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 83,
"avg_line_length": 38.64957264957265,
"alnum_prop": 0.6437417072091994,
"repo_name": "yesudeep/pyebs",
"id": "ebab4933ff4d193aeb6204e280e8a5c1d36e11d3",
"size": "4522",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "appengine_utilities/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "282325"
},
{
"name": "Python",
"bytes": "100462"
}
],
"symlink_target": ""
} |
import sys
import json
import datetime
#NOTE: NEVER TESTED!!!
US_CHAR = 0x1F;
try:
args = sys.argv[1]
incomming = args[0].split(us+"",2);
if(incomming[0] == "GetTime"):
now = datetime.datetime.now()
time = "%d:%d" % (now.hour, now.minute)
print time
elif(incomming[0] == "FoundPerson"):
now = datetime.datetime.now()
print "Cool Story Bro!"
except Exception, e:
print "INVALID"
exit()
raise e
exit() | {
"content_hash": "4160e9b6c918c014e388046f4f898ea3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 41,
"avg_line_length": 17.5,
"alnum_prop": 0.65,
"repo_name": "kammce/STARC",
"id": "d6d9cbb8c60df5d70fac709a496f6b814de8d5cf",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/pserial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2385"
},
{
"name": "C++",
"bytes": "5217"
},
{
"name": "Java",
"bytes": "610"
},
{
"name": "JavaScript",
"bytes": "6861"
},
{
"name": "Python",
"bytes": "420"
}
],
"symlink_target": ""
} |
"""
Common utilities for OpenStack
"""
import sys
import binascii
import os
import datetime
from libcloud.utils.py3 import httplib
from libcloud.utils.iso8601 import parse_date
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.compute.types import (LibcloudError, InvalidCredsError,
MalformedResponseError)
try:
import simplejson as json
except ImportError:
import json
AUTH_API_VERSION = '1.1'
# Auth versions which contain token expiration information.
AUTH_VERSIONS_WITH_EXPIRES = [
'1.1',
'2.0',
'2.0_apikey',
'2.0_password'
]
# How many seconds to substract from the auth token expiration time before
# testing if the token is still valid.
# The time is subtracted to account for the HTTP request latency and prevent
# user from getting "InvalidCredsError" if token is about to expire.
AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5
__all__ = [
'OpenStackBaseConnection',
'OpenStackAuthConnection',
'OpenStackServiceCatalog',
'OpenStackDriverMixin',
"OpenStackBaseConnection",
"OpenStackAuthConnection",
'AUTH_TOKEN_EXPIRES_GRACE_SECONDS'
]
# @TODO: Refactor for re-use by other openstack drivers
class OpenStackAuthResponse(Response):
def success(self):
return True
def parse_body(self):
if not self.body:
return None
if 'content-type' in self.headers:
key = 'content-type'
elif 'Content-Type' in self.headers:
key = 'Content-Type'
else:
raise LibcloudError('Missing content-type header',
driver=OpenStackAuthConnection)
content_type = self.headers[key]
if content_type.find(';') != -1:
content_type = content_type.split(';')[0]
if content_type == 'application/json':
try:
data = json.loads(self.body)
except:
raise MalformedResponseError('Failed to parse JSON',
body=self.body,
driver=OpenStackAuthConnection)
elif content_type == 'text/plain':
data = self.body
else:
data = self.body
return data
class OpenStackAuthConnection(ConnectionUserAndKey):
responseCls = OpenStackAuthResponse
name = 'OpenStack Auth'
timeout = None
def __init__(self, parent_conn, auth_url, auth_version, user_id, key,
tenant_name=None, timeout=None):
self.parent_conn = parent_conn
# enable tests to use the same mock connection classes.
self.conn_classes = parent_conn.conn_classes
super(OpenStackAuthConnection, self).__init__(
user_id, key, url=auth_url, timeout=timeout)
self.auth_version = auth_version
self.auth_url = auth_url
self.driver = self.parent_conn.driver
self.tenant_name = tenant_name
self.timeout = timeout
self.urls = {}
self.auth_token = None
self.auth_token_expires = None
self.auth_user_info = None
def morph_action_hook(self, action):
return action
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json; charset=UTF-8'
return headers
def authenticate(self, force=False):
"""
Authenticate against the keystone api.
@param force: Forcefully update the token even if it's already cached
and still valid.
@type force: C{bool}
"""
if not force and self.auth_version in AUTH_VERSIONS_WITH_EXPIRES \
and self._is_token_valid():
# If token is still valid, there is no need to re-authenticate
return self
if self.auth_version == "1.0":
return self.authenticate_1_0()
elif self.auth_version == "1.1":
return self.authenticate_1_1()
elif self.auth_version == "2.0" or self.auth_version == "2.0_apikey":
return self.authenticate_2_0_with_apikey()
elif self.auth_version == "2.0_password":
return self.authenticate_2_0_with_password()
else:
raise LibcloudError('Unsupported Auth Version requested')
def authenticate_1_0(self):
resp = self.request("/v1.0",
headers={
'X-Auth-User': self.user_id,
'X-Auth-Key': self.key,
},
method='GET')
if resp.status == httplib.UNAUTHORIZED:
# HTTP UNAUTHORIZED (401): auth failed
raise InvalidCredsError()
elif resp.status != httplib.NO_CONTENT:
raise MalformedResponseError('Malformed response',
body='code: %s body:%s headers:%s' % (resp.status,
resp.body,
resp.headers),
driver=self.driver)
else:
headers = resp.headers
# emulate the auth 1.1 URL list
self.urls = {}
self.urls['cloudServers'] = \
[{'publicURL': headers.get('x-server-management-url', None)}]
self.urls['cloudFilesCDN'] = \
[{'publicURL': headers.get('x-cdn-management-url', None)}]
self.urls['cloudFiles'] = \
[{'publicURL': headers.get('x-storage-url', None)}]
self.auth_token = headers.get('x-auth-token', None)
self.auth_user_info = None
if not self.auth_token:
raise MalformedResponseError('Missing X-Auth-Token in \
response headers')
return self
def authenticate_1_1(self):
reqbody = json.dumps({'credentials': {'username': self.user_id,
'key': self.key}})
resp = self.request("/v1.1/auth",
data=reqbody,
headers={},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
# HTTP UNAUTHORIZED (401): auth failed
raise InvalidCredsError()
elif resp.status != httplib.OK:
raise MalformedResponseError('Malformed response',
body='code: %s body:%s' % (resp.status, resp.body),
driver=self.driver)
else:
try:
body = json.loads(resp.body)
except Exception:
e = sys.exc_info()[1]
raise MalformedResponseError('Failed to parse JSON', e)
try:
expires = body['auth']['token']['expires']
self.auth_token = body['auth']['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = body['auth']['serviceCatalog']
self.auth_user_info = None
except KeyError:
e = sys.exc_info()[1]
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
def authenticate_2_0_with_apikey(self):
# API Key based authentication uses the RAX-KSKEY extension.
# http://s.apache.org/oAi
data = {'auth':
{'RAX-KSKEY:apiKeyCredentials':
{'username': self.user_id, 'apiKey': self.key}}}
if self.tenant_name:
data['auth']['tenantName'] = self.tenant_name
reqbody = json.dumps(data)
return self.authenticate_2_0_with_body(reqbody)
def authenticate_2_0_with_password(self):
# Password based authentication is the only 'core' authentication
# method in Keystone at this time.
# 'keystone' - http://s.apache.org/e8h
data = {'auth': \
{'passwordCredentials': \
{'username': self.user_id, 'password': self.key}}}
if self.tenant_name:
data['auth']['tenantName'] = self.tenant_name
reqbody = json.dumps(data)
return self.authenticate_2_0_with_body(reqbody)
def authenticate_2_0_with_body(self, reqbody):
resp = self.request('/v2.0/tokens',
data=reqbody,
headers={'Content-Type': 'application/json'},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
elif resp.status not in [httplib.OK,
httplib.NON_AUTHORITATIVE_INFORMATION]:
raise MalformedResponseError('Malformed response',
body='code: %s body: %s' % (resp.status, resp.body),
driver=self.driver)
else:
try:
body = json.loads(resp.body)
except Exception:
e = sys.exc_info()[1]
raise MalformedResponseError('Failed to parse JSON', e)
try:
access = body['access']
expires = access['token']['expires']
self.auth_token = access['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = access['serviceCatalog']
self.auth_user_info = access.get('user', {})
except KeyError:
e = sys.exc_info()[1]
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
def _is_token_valid(self):
"""
Return True if the current taken is already cached and hasn't expired
yet.
@rtype: C{bool}
"""
if not self.auth_token:
return False
if not self.auth_token_expires:
return False
expires = self.auth_token_expires - \
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS)
time_tuple_expires = expires.utctimetuple()
time_tuple_now = datetime.datetime.utcnow().utctimetuple()
# TODO: Subtract some reasonable grace time period
if time_tuple_now < time_tuple_expires:
return True
return False
class OpenStackServiceCatalog(object):
"""
http://docs.openstack.org/api/openstack-identity-service/2.0/content/
This class should be instanciated with the contents of the
'serviceCatalog' in the auth response. This will do the work of figuring
out which services actually exist in the catalog as well as split them up
by type, name, and region if available
"""
_auth_version = None
_service_catalog = None
def __init__(self, service_catalog, ex_force_auth_version=None):
self._auth_version = ex_force_auth_version or AUTH_API_VERSION
self._service_catalog = {}
# Check this way because there are a couple of different 2.0_*
# auth types.
if '2.0' in self._auth_version:
self._parse_auth_v2(service_catalog)
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
self._parse_auth_v1(service_catalog)
else:
raise LibcloudError('auth version "%s" not supported'
% (self._auth_version))
def get_catalog(self):
return self._service_catalog
def get_public_urls(self, service_type=None, name=None):
endpoints = self.get_endpoints(service_type=service_type,
name=name)
result = []
for endpoint in endpoints:
if 'publicURL' in endpoint:
result.append(endpoint['publicURL'])
return result
def get_endpoints(self, service_type=None, name=None):
eps = []
if '2.0' in self._auth_version:
endpoints = self._service_catalog.get(service_type, {}) \
.get(name, {})
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
endpoints = self._service_catalog.get(name, {})
for regionName, values in endpoints.items():
eps.append(values[0])
return eps
def get_endpoint(self, service_type=None, name=None, region=None):
if '2.0' in self._auth_version:
endpoint = self._service_catalog.get(service_type, {}) \
.get(name, {}).get(region, [])
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
endpoint = self._service_catalog.get(name, {}).get(region, [])
# ideally an endpoint either isn't found or only one match is found.
if len(endpoint) == 1:
return endpoint[0]
else:
return {}
def _parse_auth_v1(self, service_catalog):
for service, endpoints in service_catalog.items():
self._service_catalog[service] = {}
for endpoint in endpoints:
region = endpoint.get('region')
if region not in self._service_catalog[service]:
self._service_catalog[service][region] = []
self._service_catalog[service][region].append(endpoint)
def _parse_auth_v2(self, service_catalog):
for service in service_catalog:
service_type = service['type']
service_name = service.get('name', None)
if service_type not in self._service_catalog:
self._service_catalog[service_type] = {}
if service_name not in self._service_catalog[service_type]:
self._service_catalog[service_type][service_name] = {}
for endpoint in service.get('endpoints', []):
region = endpoint.get('region', None)
catalog = self._service_catalog[service_type][service_name]
if region not in catalog:
catalog[region] = []
catalog[region].append(endpoint)
class OpenStackBaseConnection(ConnectionUserAndKey):
"""
Base class for OpenStack connections.
@param user_id: User name to use when authenticating
@type user_id: C{string}
@param key: Secret to use when authenticating.
@type key: C{string}
@param secure: Use HTTPS? (True by default.)
@type secure: C{bool}
@param ex_force_base_url: Base URL for connection requests. If
not specified, this will be determined by authenticating.
@type ex_force_base_url: C{string}
@param ex_force_auth_url: Base URL for authentication requests.
@type ex_force_auth_url: C{string}
@param ex_force_auth_version: Authentication version to use. If
not specified, defaults to AUTH_API_VERSION.
@type ex_force_auth_version: C{string}
@param ex_force_auth_token: Authentication token to use for
connection requests. If specified, the connection will not attempt
to authenticate, and the value of ex_force_base_url will be used to
determine the base request URL. If ex_force_auth_token is passed in,
ex_force_base_url must also be provided.
@type ex_force_auth_token: C{string}
@param ex_tenant_name: When authenticating, provide this tenant
name to the identity service. A scoped token will be returned.
Some cloud providers require the tenant name to be provided at
authentication time. Others will use a default tenant if none
is provided.
@type ex_tenant_name: C{string}
@param ex_force_service_type: Service type to use when selecting an
service. If not specified, a provider specific default will be used.
@type ex_force_service_type: C{string}
@param ex_force_service_name: Service name to use when selecting an
service. If not specified, a provider specific default will be used.
@type ex_force_service_name: C{string}
@param ex_force_service_region: Region to use when selecting an
service. If not specified, a provider specific default will be used.
@type ex_force_service_region: C{string}
@param ex_auth_connection: OpenStackAuthConnection instance to use for
making HTTP requests. If not specified, a new one is instantiated.
@type ex_auth_connection: C{OpenStackAuthConnection}
"""
auth_url = None
auth_token = None
auth_token_expires = None
auth_user_info = None
service_catalog = None
service_type = None
service_name = None
service_region = None
_auth_version = None
def __init__(self, user_id, key, secure=True,
host=None, port=None, timeout=None,
ex_force_base_url=None,
ex_force_auth_url=None,
ex_force_auth_version=None,
ex_force_auth_token=None,
ex_tenant_name=None,
ex_force_service_type=None,
ex_force_service_name=None,
ex_force_service_region=None,
ex_auth_connection=None):
self._ex_force_base_url = ex_force_base_url
self._ex_force_auth_url = ex_force_auth_url
self._auth_version = self._auth_version or ex_force_auth_version
self._ex_tenant_name = ex_tenant_name
self._ex_force_service_type = ex_force_service_type
self._ex_force_service_name = ex_force_service_name
self._ex_force_service_region = ex_force_service_region
self._auth_connection = ex_auth_connection
if ex_force_auth_token:
self.auth_token = ex_force_auth_token
if ex_force_auth_token and not ex_force_base_url:
raise LibcloudError(
'Must also provide ex_force_base_url when specifying '
'ex_force_auth_token.')
if not self._auth_version:
self._auth_version = AUTH_API_VERSION
super(OpenStackBaseConnection, self).__init__(
user_id, key, secure=secure, timeout=timeout)
def get_service_catalog(self):
if self.service_catalog is None:
self._populate_hosts_and_request_paths()
return self.service_catalog
def get_endpoint(self):
"""
Selects the endpoint to use based on provider specific values,
or overrides passed in by the user when setting up the driver.
@returns: url of the relevant endpoint for the driver
"""
service_type = self.service_type
service_name = self.service_name
service_region = self.service_region
if self._ex_force_service_type:
service_type = self._ex_force_service_type
if self._ex_force_service_name:
service_name = self._ex_force_service_name
if self._ex_force_service_region:
service_region = self._ex_force_service_region
ep = self.service_catalog.get_endpoint(service_type=service_type,
name=service_name,
region=service_region)
if 'publicURL' in ep:
return ep['publicURL']
raise LibcloudError('Could not find specified endpoint')
def get_auth_connection_instance(self):
"""
Return an OpenStackAuthConnection instance for this connection.
"""
auth_url = self.auth_url
if self._ex_force_auth_url is not None:
auth_url = self._ex_force_auth_url
if auth_url is None:
raise LibcloudError('OpenStack instance must ' +
'have auth_url set')
if not self._auth_connection:
self._auth_connection = OpenStackAuthConnection(self, auth_url,
self._auth_version,
self.user_id,
self.key,
tenant_name=self._ex_tenant_name,
timeout=self.timeout)
return self._auth_connection
def add_default_headers(self, headers):
headers['X-Auth-Token'] = self.auth_token
headers['Accept'] = self.accept_format
return headers
def morph_action_hook(self, action):
self._populate_hosts_and_request_paths()
return super(OpenStackBaseConnection, self).morph_action_hook(action)
def request(self, **kwargs):
return super(OpenStackBaseConnection, self).request(**kwargs)
def _populate_hosts_and_request_paths(self):
"""
OpenStack uses a separate host for API calls which is only provided
after an initial authentication request.
"""
if not self.auth_token:
auth_connection = self.get_auth_connection_instance()
# may throw InvalidCreds, etc
auth_connection.authenticate()
self.auth_token = auth_connection.auth_token
self.auth_token_expires = auth_connection.auth_token_expires
self.auth_user_info = auth_connection.auth_user_info
# pull out and parse the service catalog
self.service_catalog = OpenStackServiceCatalog(auth_connection.urls,
ex_force_auth_version=self._auth_version)
# Set up connection info
url = self._ex_force_base_url or self.get_endpoint()
(self.host, self.port, self.secure, self.request_path) = \
self._tuple_from_url(url)
def _add_cache_busting_to_params(self, params):
cache_busting_number = binascii.hexlify(os.urandom(8))
if isinstance(params, dict):
params['cache-busting'] = cache_busting_number
else:
params.append(('cache-busting', cache_busting_number))
class OpenStackDriverMixin(object):
# Extenstion arguments which are passed to the connection class.
EXTENSTION_ARGUMENTS = [
'ex_force_base_url',
'ex_force_auth_token',
'ex_force_auth_url',
'ex_force_auth_version',
'ex_tenant_name',
'ex_force_service_type',
'ex_force_service_name',
'ex_force_service_region',
'ex_auth_connection'
]
def __init__(self, *args, **kwargs):
pairs = self._get_argument_pairs()
for argument_name, attribute_name in pairs:
value = kwargs.get(argument_name, None)
if value is None:
continue
setattr(self, attribute_name, value)
def openstack_connection_kwargs(self):
"""
@rtype: C{dict}
"""
result = {}
pairs = self._get_argument_pairs()
for argument_name, attribute_name in pairs:
value = getattr(self, attribute_name, None)
if not value:
continue
result[argument_name] = value
return result
def _get_argument_pairs(self):
result = []
for argument_name in self.EXTENSTION_ARGUMENTS:
attribute_name = '_%s' % (argument_name)
result.append([argument_name, attribute_name])
return result
| {
"content_hash": "3304218f34ed212b30ebaba977c3eebd",
"timestamp": "",
"source": "github",
"line_count": 649,
"max_line_length": 93,
"avg_line_length": 35.8828967642527,
"alnum_prop": 0.5737718996908279,
"repo_name": "IsCoolEntertainment/debpkg_libcloud",
"id": "ba1425a177604f131318e62e2a979b3af5442429",
"size": "24070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libcloud/common/openstack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2037599"
}
],
"symlink_target": ""
} |
from django.db import models
from accounts.models import Profile
from courses.models import Course
class Article(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
source_url = models.URLField(null=True)
created_by = models.ManyToManyField(Profile)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Quiz(models.Model):
course = models.ForeignKey(Course)
article = models.OneToOneField(Article, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
due = models.DateTimeField(null=True)
owner = models.ForeignKey(Profile, related_name='created_quizzes')
assigned_to = models.ManyToManyField(Profile)
solution_available = models.BooleanField(default=False)
class Meta(object):
permissions = (
('edit_quiz', 'Edit quiz'),
('attempt_quiz', 'Attempt quiz')
)
def __str__(self):
return str(self.pk)
class AnswerSheet(models.Model):
quiz = models.ForeignKey(Quiz)
owner = models.ForeignKey(Profile)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
submitted = models.BooleanField(default=False)
confirmed = models.BooleanField(default=False)
scored = models.BooleanField(default=False)
def __str__(self):
return str(self.pk)
class Question(models.Model):
quiz = models.ForeignKey(Quiz, related_name='questions')
question = models.CharField(max_length=100)
standard_answer = models.CharField(max_length=100)
sequence = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta(object):
ordering = ('sequence',)
def __str__(self):
return self.question
class Answer(models.Model):
# Staff only
answer_sheet = models.ForeignKey(AnswerSheet, related_name='answers')
question = models.ForeignKey(Question)
correct = models.NullBooleanField(null=True)
# Student only
answer = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.answer
| {
"content_hash": "4659ec0f2a39a594f0527f6470ebc5fa",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 73,
"avg_line_length": 30.734177215189874,
"alnum_prop": 0.6911037891268533,
"repo_name": "lockhawksp/beethoven",
"id": "d58734cf61037f66a31da090f52f896e5d8f85ae",
"size": "2428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quizzes/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "313"
},
{
"name": "Dart",
"bytes": "11566"
},
{
"name": "Python",
"bytes": "40102"
}
],
"symlink_target": ""
} |
import itertools
from oslo_log import log as logging
from webtest import TestApp
from designate.api import admin as admin_api
from designate.api import middleware
from designate.tests.test_api import ApiTestCase
LOG = logging.getLogger(__name__)
INVALID_ID = [
'2fdadfb1-cf96-4259-ac6b-bb7b6d2ff98g',
'2fdadfb1cf964259ac6bbb7b6d2ff9GG',
'12345'
]
class AdminApiTestCase(ApiTestCase):
def setUp(self):
super(AdminApiTestCase, self).setUp()
# Ensure the v2 API is enabled
self.config(enable_api_admin=True, group='service:api')
# Create the application
self.app = admin_api.factory({})
# Inject the NormalizeURIMiddleware middleware
self.app = middleware.NormalizeURIMiddleware(self.app)
# Inject the FaultWrapper middleware
self.app = middleware.FaultWrapperMiddleware(self.app)
# Inject the TestContext middleware
self.app = middleware.TestContextMiddleware(
self.app, self.admin_context.project_id,
self.admin_context.project_id)
# Obtain a test client
self.client = TestApp(self.app)
def tearDown(self):
self.app = None
self.client = None
super(AdminApiTestCase, self).tearDown()
def _assert_invalid_uuid(self, method, url_format, *args, **kw):
"""
Test that UUIDs used in the URL is valid.
"""
count = url_format.count('%s')
for i in itertools.product(INVALID_ID, repeat=count):
self._assert_exception('invalid_uuid', 400, method, url_format % i)
def _assert_exception(self, expected_type, expected_status, obj,
*args, **kwargs):
"""
Checks the response that a api call with a exception contains the
wanted data.
"""
kwargs.setdefault('status', expected_status)
response = obj(*args, **kwargs) if not hasattr(obj, 'json') else obj
self.assertEqual(expected_status, response.json['code'])
self.assertEqual(expected_type, response.json['type'])
def _assert_invalid_paging(self, data, url, key):
"""
Test that certain circumstances is invalid for paging in a given url.
"""
self._assert_paging(data, url, key=key,
limit='invalid_limit',
expected_type='invalid_limit',
expected_status=400)
self._assert_paging(data, url, key=key,
sort_dir='invalid_sort_dir',
expected_type='invalid_sort_dir',
expected_status=400)
self._assert_paging(data, url, key=key,
sort_key='invalid_sort_key',
expected_type='invalid_sort_key',
expected_status=400)
self._assert_paging(data, url, key=key,
marker='invalid_marker',
expected_type='invalid_marker',
expected_status=400)
def _assert_paging(self, data, url, key=None, limit=5, sort_dir='asc',
sort_key='created_at', marker=None,
expected_type=None, expected_status=200):
def _page(marker=None):
params = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key}
if marker is not None:
params['marker'] = marker
r = self.client.get(url, params, status=expected_status)
if expected_status != 200:
if expected_type:
self._assert_exception(expected_type, expected_status, r)
return r
else:
return r.json[key] if key in r.json else r.json
response = _page(marker=marker)
if expected_status != 200:
if expected_type:
self._assert_exception(expected_type, expected_status,
response)
return response
x = 0
length = len(data)
for i in range(0, length):
assert data[i]['id'] == response[x]['id']
x += 1
# Don't bother getting a new page if we're at the last item
if x == len(response) and i != length - 1:
x = 0
response = _page(response[-1:][0]['id'])
_page(marker=response[-1:][0]['id'])
| {
"content_hash": "ef8de6bd9edaa7a23039b1e8d1dbcc03",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 33.48148148148148,
"alnum_prop": 0.5480088495575222,
"repo_name": "openstack/designate",
"id": "82bbec2949ff7ad51647b15b5b457f5fc51bc4cf",
"size": "5175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/tests/test_api/test_admin/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "71074"
},
{
"name": "Jinja",
"bytes": "2004"
},
{
"name": "Mako",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "2442862"
},
{
"name": "Shell",
"bytes": "46200"
}
],
"symlink_target": ""
} |
import numpy as np
import copy
from sklearn.base import BaseEstimator
from scipy.ndimage import rotate
def load_images(filenames):
'''Expects filenames to be a list of .fits file locations'''
from astropy.io.fits import getdata
return [ normalized_positive_image(getdata(filename)) for filename in filenames ]
def normalized_positive_image(image) :
'''Images need to be numpy arrays between -1 and 1 for median and
possibly HOG, but also should be log normalized so contrast is
maintained.'''
pos_def = np.clip(image,1e-6,1e100)+1.0
return np.log(pos_def) / abs(np.log(pos_def)).max()
def rotate_images(degrees, images_X, images_y) :
rotated_images_X = []
rotated_y = []
for d in degrees :
rotated_images_X += [ rotate(image, d) for image in images_X ]
rotated_y += images_y
return rotated_images_X, rotated_y
class MedianSmooth(BaseEstimator):
def __init__(self, radius = 3):
self.radius = radius
def fit(self, images, y = None):
return self
def transform(self, images):
from skimage.filters.rank import median
from skimage.morphology import disk
return np.array([median(image, disk(self.radius))
for image in images])
def fit_transform(self, images, y = None):
return self.transform(images)
class Clip(BaseEstimator) :
'''Numpy clip'''
def __init__( self, clip_min=1e-6, clip_max=1e100):
self.clip_min = clip_min
self.clip_max = clip_max
def fit(self, image, y=None) :
return self
def transform( self, images ) :
return np.array( [ np.clip( image, self.clip_min, self.clip_max )
for image in images ] )
def fit_transform( self, images, y = None ) :
return self.transform(images)
class LogPositiveDefinite(BaseEstimator) :
''' Shift all values to positive definite with options for taking
log of image and making log positive definite. Return normalized
values.'''
def __init__( self, log = True ) :
self.log = log
def fit( self, images, y = None ) :
return self
def _make_positive( self, image ) :
''' Ensure that the minimum value is just above zero. '''
return image - image.min() + np.abs(image.min())
def _normalize( self, image ) :
if self.log :
return np.log( self._make_positive(image) ) / np.log( self._make_positive(image) ).max()
else :
return self._make_positive(image) / self._make_positive(image).max()
def transform( self, images ) :
return np.array( [ self._normalize(image) for image in images ] )
class HOG(BaseEstimator):
def __init__(self, orientations = 9, pixels_per_cell = (8, 8),
cells_per_block = (3, 3) ):
self.orientations = orientations
self.pixels_per_cell = pixels_per_cell
self.cells_per_block = cells_per_block
def fit(self, images, y = None):
return self
def transform(self, images):
from skimage.feature import hog
return np.array([hog(image,
orientations = self.orientations,
pixels_per_cell = self.pixels_per_cell,
cells_per_block = self.cells_per_block,
)
for image in images])
def fit_transform(self, images, y = None):
return self.transform(images)
| {
"content_hash": "b2fcf1d0627103798e26eef9ce06b20a",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 100,
"avg_line_length": 31.079646017699115,
"alnum_prop": 0.6007972665148064,
"repo_name": "cavestruz/StrongCNN",
"id": "e172035a5423479f6478ab697d8605f08795f7cc",
"size": "3512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/image_processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7822"
},
{
"name": "Python",
"bytes": "94637"
},
{
"name": "Shell",
"bytes": "4089"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SRI values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the solari data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Solari/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Solari")
return os.path.expanduser("~/.solari")
def read_solari_config(dbdir):
"""Read the solari.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "solari.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a solari JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 29980 if testnet else 29978
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the solarid we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(solarid):
info = solarid.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
solarid.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = solarid.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(solarid):
address_summary = dict()
address_to_account = dict()
for info in solarid.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = solarid.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = solarid.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-solari-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(solarid, fromaddresses, toaddress, amount, fee):
all_coins = list_available(solarid)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SRI available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to solarid.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = solarid.createrawtransaction(inputs, outputs)
signed_rawtx = solarid.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(solarid, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = solarid.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(solarid, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = solarid.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(solarid, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get solaris from")
parser.add_option("--to", dest="to", default=None,
help="address to get send solaris to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of solari.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_solari_config(options.datadir)
if options.testnet: config['testnet'] = True
solarid = connect_JSON(config)
if options.amount is None:
address_summary = list_available(solarid)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(solarid) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(solarid, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(solarid, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = solarid.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "5f46f0de658ca657f717f9571634c0c1",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.24603174603175,
"alnum_prop": 0.6140278065988795,
"repo_name": "CoinAge-DAO/solari",
"id": "9d803027cde2285be92b41dd25130e6ee8fe3346",
"size": "10013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "448543"
},
{
"name": "C++",
"bytes": "3558773"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "19722"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2099"
},
{
"name": "Makefile",
"bytes": "58619"
},
{
"name": "Objective-C",
"bytes": "2020"
},
{
"name": "Objective-C++",
"bytes": "7300"
},
{
"name": "Protocol Buffer",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "220621"
},
{
"name": "QMake",
"bytes": "2018"
},
{
"name": "Shell",
"bytes": "37774"
}
],
"symlink_target": ""
} |
import os
from shutil import rmtree
from urlparse import urlparse
from uuid import uuid4
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.core.urlresolvers import reverse
from django.db import connection
from django.template import Context, Template, TemplateDoesNotExist
from django.template.loader import get_template
from django.test import TestCase
from django.utils.html import strip_tags
from django.utils.http import int_to_base36
from django.contrib.sites.models import Site
from PIL import Image
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
from mezzanine.blog.models import BlogPost
from mezzanine.conf import settings, registry
from mezzanine.conf.models import Setting
from mezzanine.core.models import CONTENT_STATUS_DRAFT
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
from mezzanine.forms import fields
from mezzanine.forms.models import Form
from mezzanine.galleries.models import Gallery, GALLERIES_UPLOAD_DIR
from mezzanine.generic.forms import RatingForm
from mezzanine.generic.models import ThreadedComment, AssignedKeyword, Keyword
from mezzanine.generic.models import RATING_RANGE
from mezzanine.pages.models import RichTextPage
from mezzanine.urls import PAGES_SLUG
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.tests import copy_test_to_media, run_pyflakes_for_package
from mezzanine.utils.tests import run_pep8_for_package
class Tests(TestCase):
"""
Mezzanine tests.
"""
def setUp(self):
"""
Create an admin user.
"""
self._username = "test"
self._password = "test"
args = (self._username, "[email protected]", self._password)
self._user = User.objects.create_superuser(*args)
def account_data(self, test_value):
"""
Returns a dict with test data for all the user/profile fields.
"""
# User fields
data = {"email": test_value + "@example.com"}
for field in ("first_name", "last_name", "username",
"password1", "password2"):
if field.startswith("password"):
value = "x" * settings.ACCOUNTS_MIN_PASSWORD_LENGTH
else:
value = test_value
data[field] = value
# Profile fields
Profile = get_profile_model()
if Profile is not None:
user_fieldname = get_profile_user_fieldname()
for field in Profile._meta.fields:
if field.name not in (user_fieldname, "id"):
if field.choices:
value = field.choices[0][0]
else:
value = test_value
data[field.name] = value
return data
def test_account(self):
"""
Test account creation.
"""
# Verification not required - test an active user is created.
data = self.account_data("test1")
settings.ACCOUNTS_VERIFICATION_REQUIRED = False
response = self.client.post(reverse("signup"), data, follow=True)
self.assertEqual(response.status_code, 200)
users = User.objects.filter(email=data["email"], is_active=True)
self.assertEqual(len(users), 1)
# Verification required - test an inactive user is created,
settings.ACCOUNTS_VERIFICATION_REQUIRED = True
data = self.account_data("test2")
emails = len(mail.outbox)
response = self.client.post(reverse("signup"), data, follow=True)
self.assertEqual(response.status_code, 200)
users = User.objects.filter(email=data["email"], is_active=False)
self.assertEqual(len(users), 1)
# Test the verification email.
self.assertEqual(len(mail.outbox), emails + 1)
self.assertEqual(len(mail.outbox[0].to), 1)
self.assertEqual(mail.outbox[0].to[0], data["email"])
# Test the verification link.
new_user = users[0]
verification_url = reverse("signup_verify", kwargs={
"uidb36": int_to_base36(new_user.id),
"token": default_token_generator.make_token(new_user),
})
response = self.client.get(verification_url, follow=True)
self.assertEqual(response.status_code, 200)
users = User.objects.filter(email=data["email"], is_active=True)
self.assertEqual(len(users), 1)
def test_draft_page(self):
"""
Test a draft page as only being viewable by a staff member.
"""
self.client.logout()
draft = RichTextPage.objects.create(title="Draft",
status=CONTENT_STATUS_DRAFT)
response = self.client.get(draft.get_absolute_url())
self.assertEqual(response.status_code, 404)
self.client.login(username=self._username, password=self._password)
response = self.client.get(draft.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_overridden_page(self):
"""
Test that a page with a slug matching a non-page urlpattern
return ``True`` for its overridden property.
"""
# BLOG_SLUG is empty then urlpatterns for pages are prefixed
# with PAGE_SLUG, and generally won't be overridden. In this
# case, there aren't any overridding URLs by default, so bail
# on the test.
if PAGES_SLUG:
return
page, created = RichTextPage.objects.get_or_create(slug="edit")
self.assertTrue(page.overridden())
def test_description(self):
"""
Test generated description is text version of the first line
of content.
"""
description = "<p>How now brown cow</p>"
page = RichTextPage.objects.create(title="Draft",
content=description * 3)
self.assertEqual(page.description, strip_tags(description))
def test_device_specific_template(self):
"""
Test that an alternate template is rendered when a mobile
device is used.
"""
try:
get_template("mobile/index.html")
except TemplateDoesNotExist:
return
template_name = lambda t: t.name if hasattr(t, "name") else t[0].name
ua = settings.DEVICE_USER_AGENTS[0][1][0]
kwargs = {"slug": "device-test"}
url = reverse("page", kwargs=kwargs)
kwargs["status"] = CONTENT_STATUS_PUBLISHED
RichTextPage.objects.get_or_create(**kwargs)
default = self.client.get(url).template
mobile = self.client.get(url, HTTP_USER_AGENT=ua).template
self.assertNotEqual(template_name(default), template_name(mobile))
def test_blog_views(self):
"""
Basic status code test for blog views.
"""
response = self.client.get(reverse("blog_post_list"))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("blog_post_feed", args=("rss",)))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("blog_post_feed", args=("atom",)))
self.assertEqual(response.status_code, 200)
blog_post = BlogPost.objects.create(title="Post", user=self._user,
status=CONTENT_STATUS_PUBLISHED)
response = self.client.get(blog_post.get_absolute_url())
self.assertEqual(response.status_code, 200)
# Test the blog is login protected if its page has login_required
# set to True.
slug = settings.BLOG_SLUG or "/"
RichTextPage.objects.create(title="blog", slug=slug,
login_required=True)
response = self.client.get(reverse("blog_post_list"), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.redirect_chain) > 0)
redirect_path = urlparse(response.redirect_chain[0][0]).path
self.assertEqual(redirect_path, settings.LOGIN_URL)
def test_rating(self):
"""
Test that ratings can be posted and avarage/count are calculated.
"""
blog_post = BlogPost.objects.create(title="Ratings", user=self._user,
status=CONTENT_STATUS_PUBLISHED)
data = RatingForm(blog_post).initial
for value in RATING_RANGE:
data["value"] = value
response = self.client.post(reverse("rating"), data=data)
response.delete_cookie("mezzanine-rating")
blog_post = BlogPost.objects.get(id=blog_post.id)
count = len(RATING_RANGE)
average = sum(RATING_RANGE) / float(count)
self.assertEqual(blog_post.rating_count, count)
self.assertEqual(blog_post.rating_average, average)
def queries_used_for_template(self, template, **context):
"""
Return the number of queries used when rendering a template
string.
"""
settings.DEBUG = True
connection.queries = []
t = Template(template)
t.render(Context(context))
settings.DEBUG = False
return len(connection.queries)
def create_recursive_objects(self, model, parent_field, **kwargs):
"""
Create multiple levels of recursive objects.
"""
per_level = range(3)
for _ in per_level:
kwargs[parent_field] = None
level1 = model.objects.create(**kwargs)
for _ in per_level:
kwargs[parent_field] = level1
level2 = model.objects.create(**kwargs)
for _ in per_level:
kwargs[parent_field] = level2
model.objects.create(**kwargs)
def test_comment_queries(self):
"""
Test that rendering comments executes the same number of
queries, regardless of the number of nested replies.
"""
blog_post = BlogPost.objects.create(title="Post", user=self._user)
content_type = ContentType.objects.get_for_model(blog_post)
kwargs = {"content_type": content_type, "object_pk": blog_post.id,
"site_id": settings.SITE_ID}
template = "{% load comment_tags %}{% comment_thread blog_post %}"
context = {
"blog_post": blog_post,
"posted_comment_form": None,
"unposted_comment_form": None,
}
before = self.queries_used_for_template(template, **context)
self.create_recursive_objects(ThreadedComment, "replied_to", **kwargs)
after = self.queries_used_for_template(template, **context)
self.assertEquals(before, after)
def test_page_menu_queries(self):
"""
Test that rendering a page menu executes the same number of
queries regardless of the number of pages or levels of
children.
"""
template = ('{% load pages_tags %}'
'{% page_menu "pages/menus/tree.html" %}')
before = self.queries_used_for_template(template)
self.create_recursive_objects(RichTextPage, "parent", title="Page",
status=CONTENT_STATUS_PUBLISHED)
after = self.queries_used_for_template(template)
self.assertEquals(before, after)
def test_page_menu_flags(self):
"""
Test that pages only appear in the menu templates they've been
assigned to show in.
"""
menus = []
pages = []
template = "{% load pages_tags %}"
for i, label, path in settings.PAGE_MENU_TEMPLATES:
menus.append(i)
pages.append(RichTextPage.objects.create(in_menus=list(menus),
title="Page for %s" % label, status=CONTENT_STATUS_PUBLISHED))
template += "{%% page_menu '%s' %%}" % path
rendered = Template(template).render(Context({}))
for page in pages:
self.assertEquals(rendered.count(page.title), len(page.in_menus))
def test_keywords(self):
"""
Test that the keywords_string field is correctly populated.
"""
page = RichTextPage.objects.create(title="test keywords")
keywords = set(["how", "now", "brown", "cow"])
Keyword.objects.all().delete()
for keyword in keywords:
keyword_id = Keyword.objects.get_or_create(title=keyword)[0].id
page.keywords.add(AssignedKeyword(keyword_id=keyword_id))
page = RichTextPage.objects.get(id=page.id)
self.assertEquals(keywords, set(page.keywords_string.split()))
# Test removal.
first = Keyword.objects.all()[0]
keywords.remove(first.title)
first.delete()
page = RichTextPage.objects.get(id=page.id)
self.assertEquals(keywords, set(page.keywords_string.split()))
page.delete()
def test_search(self):
"""
Test search.
"""
RichTextPage.objects.all().delete()
published = {"status": CONTENT_STATUS_PUBLISHED}
first = RichTextPage.objects.create(title="test page",
status=CONTENT_STATUS_DRAFT).id
second = RichTextPage.objects.create(title="test another test page",
**published).id
# Draft shouldn't be a result.
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 1)
RichTextPage.objects.filter(id=first).update(**published)
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 2)
# Either word.
results = RichTextPage.objects.search("another test")
self.assertEqual(len(results), 2)
# Must include first word.
results = RichTextPage.objects.search("+another test")
self.assertEqual(len(results), 1)
# Mustn't include first word.
results = RichTextPage.objects.search("-another test")
self.assertEqual(len(results), 1)
if results:
self.assertEqual(results[0].id, first)
# Exact phrase.
results = RichTextPage.objects.search('"another test"')
self.assertEqual(len(results), 1)
if results:
self.assertEqual(results[0].id, second)
# Test ordering.
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 2)
if results:
self.assertEqual(results[0].id, second)
def test_forms(self):
"""
Simple 200 status check against rendering and posting to forms
with both optional and required fields.
"""
for required in (True, False):
form = Form.objects.create(title="Form",
status=CONTENT_STATUS_PUBLISHED)
for (i, (field, _)) in enumerate(fields.NAMES):
form.fields.create(label="Field %s" % i, field_type=field,
required=required, visible=True)
response = self.client.get(form.get_absolute_url())
self.assertEqual(response.status_code, 200)
visible_fields = form.fields.visible()
data = dict([("field_%s" % f.id, "test") for f in visible_fields])
response = self.client.post(form.get_absolute_url(), data=data)
self.assertEqual(response.status_code, 200)
def test_settings(self):
"""
Test that an editable setting can be overridden with a DB
value and that the data type is preserved when the value is
returned back out of the DB. Also checks to ensure no
unsupported types are defined for editable settings.
"""
# Find an editable setting for each supported type.
names_by_type = {}
for setting in registry.values():
if setting["editable"] and setting["type"] not in names_by_type:
names_by_type[setting["type"]] = setting["name"]
# Create a modified value for each setting and save it.
values_by_name = {}
for (setting_type, setting_name) in names_by_type.items():
setting_value = registry[setting_name]["default"]
if setting_type in (int, float):
setting_value += 1
elif setting_type is bool:
setting_value = not setting_value
elif setting_type in (str, unicode):
setting_value += "test"
else:
setting = "%s: %s" % (setting_name, setting_type)
self.fail("Unsupported setting type for %s" % setting)
values_by_name[setting_name] = setting_value
Setting.objects.create(name=setting_name, value=str(setting_value))
# Load the settings and make sure the DB values have persisted.
settings.use_editable()
for (name, value) in values_by_name.items():
self.assertEqual(getattr(settings, name), value)
def test_syntax(self):
"""
Run pyflakes/pep8 across the code base to check for potential errors.
"""
warnings = []
warnings.extend(run_pyflakes_for_package("mezzanine"))
warnings.extend(run_pep8_for_package("mezzanine"))
if warnings:
self.fail("Syntax warnings!\n\n%s" % "\n".join(warnings))
def test_utils(self):
"""
Miscellanous tests for the ``mezzanine.utils`` package.
"""
self.assertRaises(ImportError, import_dotted_path, "mezzanine")
self.assertRaises(ImportError, import_dotted_path, "mezzanine.NO")
self.assertRaises(ImportError, import_dotted_path, "mezzanine.core.NO")
try:
import_dotted_path("mezzanine.core")
except ImportError:
self.fail("mezzanine.utils.imports.import_dotted_path"
"could not import \"mezzanine.core\"")
def _create_page(self, title, status):
return RichTextPage.objects.create(title=title, status=status)
def _test_site_pages(self, title, status, count):
# test _default_manager
pages = RichTextPage._default_manager.all()
self.assertEqual(pages.count(), count)
self.assertTrue(title in [page.title for page in pages])
# test objects manager
pages = RichTextPage.objects.all()
self.assertEqual(pages.count(), count)
self.assertTrue(title in [page.title for page in pages])
# test response status code
code = 200 if status == CONTENT_STATUS_PUBLISHED else 404
pages = RichTextPage.objects.filter(status=status)
response = self.client.get(pages[0].get_absolute_url())
self.assertEqual(response.status_code, code)
def test_mulisite(self):
from django.conf import settings
# setup
try:
old_site_id = settings.SITE_ID
except:
old_site_id = None
site1 = Site.objects.create(domain="site1.com")
site2 = Site.objects.create(domain="site2.com")
# create pages under site1, which should be only accessible
# when SITE_ID is site1
settings.SITE_ID = site1.pk
site1_page = self._create_page("Site1", CONTENT_STATUS_PUBLISHED)
self._test_site_pages("Site1", CONTENT_STATUS_PUBLISHED, count=1)
# create pages under site2, which should only be accessible
# when SITE_ID is site2
settings.SITE_ID = site2.pk
self._create_page("Site2", CONTENT_STATUS_PUBLISHED)
self._test_site_pages("Site2", CONTENT_STATUS_PUBLISHED, count=1)
# original page should 404
response = self.client.get(site1_page.get_absolute_url())
self.assertEqual(response.status_code, 404)
# change back to site1, and only the site1 pages should be retrieved
settings.SITE_ID = site1.pk
self._test_site_pages("Site1", CONTENT_STATUS_PUBLISHED, count=1)
# insert a new record, see the count change
self._create_page("Site1 Draft", CONTENT_STATUS_DRAFT)
self._test_site_pages("Site1 Draft", CONTENT_STATUS_DRAFT, count=2)
self._test_site_pages("Site1 Draft", CONTENT_STATUS_PUBLISHED, count=2)
# change back to site2, and only the site2 pages should be retrieved
settings.SITE_ID = site2.pk
self._test_site_pages("Site2", CONTENT_STATUS_PUBLISHED, count=1)
# insert a new record, see the count change
self._create_page("Site2 Draft", CONTENT_STATUS_DRAFT)
self._test_site_pages("Site2 Draft", CONTENT_STATUS_DRAFT, count=2)
self._test_site_pages("Site2 Draft", CONTENT_STATUS_PUBLISHED, count=2)
# tear down
if old_site_id:
settings.SITE_ID = old_site_id
else:
del settings.SITE_ID
site1.delete()
site2.delete()
def test_gallery_import(self):
"""
Test that a gallery creates images when given a zip file to
import, and that descriptions are created.
"""
zip_name = "gallery.zip"
copy_test_to_media("mezzanine.core", zip_name)
title = str(uuid4())
gallery = Gallery.objects.create(title=title, zip_import=zip_name)
images = list(gallery.images.all())
self.assertTrue(images)
self.assertTrue(all([image.description for image in images]))
# Clean up.
rmtree(unicode(os.path.join(settings.MEDIA_ROOT,
GALLERIES_UPLOAD_DIR, title)))
def test_thumbnail_generation(self):
"""
Test that a thumbnail is created and resized.
"""
image_name = "image.jpg"
size = (24, 24)
copy_test_to_media("mezzanine.core", image_name)
thumb_name = os.path.join(settings.THUMBNAILS_DIR_NAME,
image_name.replace(".", "-%sx%s." % size))
thumb_path = os.path.join(settings.MEDIA_ROOT, thumb_name)
thumb_image = thumbnail(image_name, *size)
self.assertEqual(os.path.normpath(thumb_image.lstrip("/")), thumb_name)
self.assertNotEqual(os.path.getsize(thumb_path), 0)
thumb = Image.open(thumb_path)
self.assertEqual(thumb.size, size)
# Clean up.
del thumb
os.remove(os.path.join(settings.MEDIA_ROOT, image_name))
os.remove(os.path.join(thumb_path))
rmtree(os.path.join(os.path.dirname(thumb_path)))
| {
"content_hash": "73f0449681258c7c00a25c3ab13141fe",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 79,
"avg_line_length": 42.578947368421055,
"alnum_prop": 0.6130584495850256,
"repo_name": "westinedu/newertrends",
"id": "c8b798cbd49c387aeb0ed3855fd99ec7ce8e6540",
"size": "22653",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mezzanine/core/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "450683"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "5511333"
},
{
"name": "Ruby",
"bytes": "249"
},
{
"name": "Shell",
"bytes": "1355"
}
],
"symlink_target": ""
} |
from enum import Enum, unique
from django.db import models
from django.utils.translation import ugettext_lazy as _
@unique
class Fields(Enum):
ENERGY = 1
MARITIME_FISHERIES = 2
HEALTH = 3
DEFENSE = 4
ENVIRONMENT = 5
SOCIAL = 6
EDUCATION = 7
TECHNOLOGY = 8
CULTURE_AND_LANGUAGE = 9
ECONOMY = 10
AGRICULTURE = 11
ARCHITECTURE_AND_URBAN_PLANNING = 12
class Initiative(models.Model):
# Overview
name = models.CharField(_('Initiative name'), max_length=255)
description = models.TextField(_('Description'))
predicted_beneficiary_count = models.IntegerField(_('Predicted beneficiary count'),
null=True, blank=True)
location = models.TextField(_('Location'))
target = models.CharField(_('Target'), max_length=255,
help_text=_('Institution / Organization / Government / Specific society group'))
start = models.DateField(_('Start date'))
end = models.DateField(_('End date'))
contact_person_name = models.CharField(_('Contact person name'), max_length=255)
contact_person_number = models.CharField(_('Contact person mobile number'), max_length=255)
field = models.SmallIntegerField(
_("Initiative type"),
choices=(
(Fields.ENERGY.value, _('Energy')),
(Fields.MARITIME_FISHERIES.value, _('Maritime and Fisheries')),
(Fields.HEALTH.value, _('Health')),
(Fields.DEFENSE.value, _('Defense')),
(Fields.ENVIRONMENT.value, _('Environment')),
(Fields.SOCIAL.value, _('Social')),
(Fields.EDUCATION.value, _('Education')),
(Fields.TECHNOLOGY.value, _('Technology')),
(Fields.CULTURE_AND_LANGUAGE.value, _('Culture and language')),
(Fields.ECONOMY.value, _('Economy')),
(Fields.AGRICULTURE.value, _('Agriculture')),
(Fields.ARCHITECTURE_AND_URBAN_PLANNING.value, _('Architecture and urban planning')),
)
)
# Initiative details
# Meta
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
is_published = models.BooleanField(default=False)
| {
"content_hash": "435a4eeec90886868351e821ea34c2b2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 110,
"avg_line_length": 36.11290322580645,
"alnum_prop": 0.6158999553372041,
"repo_name": "SeiryuZ/magnet",
"id": "cbc3f20f02939c0f5cd8f3f415e559180ed7509a",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnet/apps/initiatives/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74"
},
{
"name": "HTML",
"bytes": "2217"
},
{
"name": "JavaScript",
"bytes": "471"
},
{
"name": "Python",
"bytes": "27830"
}
],
"symlink_target": ""
} |
from st2common import log as logging
from st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE
from st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE
from st2common.constants.keyvalue import ALLOWED_SCOPES
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR
from st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException
from st2common.models.system.keyvalue import UserKeyReference
from st2common.persistence.keyvalue import KeyValuePair
__all__ = [
'get_kvp_for_name',
'get_values_for_names',
'KeyValueLookup',
'UserKeyValueLookup'
]
LOG = logging.getLogger(__name__)
def get_kvp_for_name(name):
try:
kvp_db = KeyValuePair.get_by_name(name)
except ValueError:
kvp_db = None
return kvp_db
def get_values_for_names(names, default_value=None):
"""
Retrieve values for the provided key names (multi get).
If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain
default_value for that name.
:rtype: ``dict``
"""
result = {}
kvp_dbs = KeyValuePair.get_by_names(names=names)
name_to_kvp_db_map = {}
for kvp_db in kvp_dbs:
name_to_kvp_db_map[kvp_db.name] = kvp_db.value
for name in names:
result[name] = name_to_kvp_db_map.get(name, default_value)
return result
class KeyValueLookup(object):
def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):
if not scope:
scope = FULL_SYSTEM_SCOPE
if scope == SYSTEM_SCOPE:
scope = FULL_SYSTEM_SCOPE
self._prefix = prefix
self._key_prefix = key_prefix or ''
self._value_cache = cache or {}
self._scope = scope
def __str__(self):
return self._value_cache[self._key_prefix]
def __getitem__(self, key):
return self._get(key)
def __getattr__(self, name):
return self._get(name)
def _get(self, name):
# get the value for this key and save in value_cache
if self._key_prefix:
key = '%s.%s' % (self._key_prefix, name)
else:
key = name
if self._prefix:
kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])
else:
kvp_key = key
value = self._get_kv(kvp_key)
self._value_cache[key] = value
# return a KeyValueLookup as response since the lookup may not be complete e.g. if
# the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,
# will expect to do a dictionary style lookup for key_base and key_value as subsequent
# calls. Saving the value in cache avoids extra DB calls.
return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,
scope=self._scope)
def _get_kv(self, key):
scope = self._scope
LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)
kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)
if kvp:
LOG.debug('Got value %s from datastore.', kvp.value)
return kvp.value if kvp else ''
class UserKeyValueLookup(object):
def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):
if not scope:
scope = FULL_USER_SCOPE
if scope == USER_SCOPE:
scope = FULL_USER_SCOPE
self._prefix = prefix
self._key_prefix = key_prefix or ''
self._value_cache = cache or {}
self._user = user
self._scope = scope
def __str__(self):
return self._value_cache[self._key_prefix]
def __getitem__(self, key):
return self._get(key)
def __getattr__(self, name):
return self._get(name)
def _get(self, name):
# get the value for this key and save in value_cache
if self._key_prefix:
key = '%s.%s' % (self._key_prefix, name)
else:
key = UserKeyReference(name=name, user=self._user).ref
if self._prefix:
kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])
else:
kvp_key = key
value = self._get_kv(kvp_key)
self._value_cache[key] = value
# return a KeyValueLookup as response since the lookup may not be complete e.g. if
# the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,
# will expect to do a dictionary style lookup for key_base and key_value as subsequent
# calls. Saving the value in cache avoids extra DB calls.
return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,
cache=self._value_cache, scope=self._scope)
def _get_kv(self, key):
scope = self._scope
kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)
return kvp.value if kvp else ''
def get_key_reference(scope, name, user=None):
"""
Given a key name and user this method returns a new name (string ref)
to address the key value pair in the context of that user.
:param user: User to whom key belongs.
:type name: ``str``
:param name: Original name of the key.
:type name: ``str``
:rtype: ``str``
"""
if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):
return name
elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):
if not user:
raise InvalidUserException('A valid user must be specified for user key ref.')
return UserKeyReference(name=name, user=user).ref
else:
raise InvalidScopeException('Scope "%s" is not valid. Allowed scopes are %s.' %
(scope, ALLOWED_SCOPES))
| {
"content_hash": "c6dbac8c1e528b04fbd76e45042dd770",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 96,
"avg_line_length": 32.747191011235955,
"alnum_prop": 0.6170869788986104,
"repo_name": "lakshmi-kannan/st2",
"id": "be443eda56812339a80ff15a2d26de7c004e79ba",
"size": "6609",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2common/st2common/services/keyvalues.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41834"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3895413"
},
{
"name": "Shell",
"bytes": "40304"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
"""This module provides functions to generate an ec2stack configuration file.
"""
import os
import argparse
from ConfigParser import SafeConfigParser
def main():
"""
Entry point into the configuration application.
"""
config_folder = _create_config_folder()
_create_config_file(config_folder)
def _create_config_folder():
"""
Creates a folder to hold the user's configuration files.
@return: Path of the configuration folder.
"""
config_folder = os.path.join(os.path.expanduser('~'), '.ec2stack')
if not os.path.exists(config_folder):
os.makedirs(config_folder)
os.chmod(config_folder, 0700)
return config_folder
def _create_config_file(config_folder):
"""
Reads in configuration items and writes them out to the configuration file.
@param config_folder: Path of the configuration folder.
"""
args = _generate_args()
profile = args.pop('profile')
advanced_network_enabled = args.pop('advanced')
config_file_path = config_folder + '/ec2stack.conf'
config = _modify_config_profile(
config_file_path, profile, advanced_network_enabled)
config_file = open(config_file_path, 'w+')
config.write(config_file)
def _generate_args():
"""
Generate command line arguments for ec2stack-configure.
@return: args.
"""
parser = argparse.ArgumentParser(
'Command line utility for configuring ec2stack'
)
parser.add_argument(
'-p',
'--profile',
required=False,
help='The profile to configure, default is initial',
default='initial'
)
parser.add_argument(
'-a',
'--advanced',
required=False,
help='Turn advanced network config on for application',
default=False
)
args = parser.parse_args()
return vars(args)
def _modify_config_profile(config_file, profile, advanced_network_enabled):
"""
Modify configuration profile.
@param config_file: current config file configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
config = SafeConfigParser()
config.read(config_file)
if not config.has_section(profile):
config.add_section(profile)
config = _set_mandatory_attributes_of_profile(config, profile)
if advanced_network_enabled:
config = _set_advanced_network_attributes_of_profile(config, profile)
config = _set_optional_attributes_of_profile(config, profile)
return config
def _set_mandatory_attributes_of_profile(config, profile):
"""
Modify mandatory attributes of profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
config = _set_attribute_of_profile(
config, profile, 'ec2stack_bind_address', 'EC2Stack bind address', 'localhost'
)
config = _set_attribute_of_profile(
config, profile, 'ec2stack_port', 'EC2Stack bind port', '5000'
)
config = _set_attribute_of_profile(
config, profile, 'cloudstack_host', 'Cloudstack host', 'localhost'
)
config = _set_attribute_of_profile(
config, profile, 'cloudstack_port', 'Cloudstack port', '8080'
)
config = _set_attribute_of_profile(
config, profile, 'cloudstack_protocol', 'Cloudstack protocol', 'http'
)
config = _set_attribute_of_profile(
config, profile, 'cloudstack_path', 'Cloudstack path', '/client/api'
)
config = _set_attribute_of_profile(
config, profile, 'cloudstack_custom_disk_offering', 'Cloudstack custom disk offering name', 'Custom'
)
while True:
config = _set_attribute_of_profile(
config, profile, 'cloudstack_default_zone', 'Cloudstack default zone name', ''
)
if config.get(profile, 'cloudstack_default_zone') is not '':
break
return config
def _set_advanced_network_attributes_of_profile(config, profile):
"""
Modify advanced network attributes of profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
config = _set_attribute_of_profile(
config, profile, 'vpc_offering_id', 'VPC offering id', ''
)
return config
def _set_optional_attributes_of_profile(config, profile):
"""
Modify optional attributes of profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
configure_instance_type_mapings = raw_input(
'Do you wish to input instance type mappings? (Yes/No): '
)
if configure_instance_type_mapings.lower() in ['yes', 'y']:
config = _read_user_instance_mappings(config, profile)
configure_resource_type_mapings = raw_input(
'Do you wish to input resource type to resource id mappings' +
' for tag support? (Yes/No): '
)
if configure_resource_type_mapings.lower() in ['yes', 'y']:
config = _read_user_resource_type_mappings(config, profile)
return config
def _read_user_instance_mappings(config, profile):
"""
Add instance type mappings to profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
instance_section = profile + "instancemap"
if not config.has_section(instance_section):
config.add_section(instance_section)
while True:
key = raw_input(
'Insert the AWS EC2 instance type you wish to map: '
)
value = raw_input(
'Insert the name of the instance type you wish to map this to: '
)
config.set(instance_section, key, value)
add_more = raw_input(
'Do you wish to add more mappings? (Yes/No): ')
if add_more.lower() in ['no', 'n']:
break
return config
def _read_user_resource_type_mappings(config, profile):
"""
Add resource type mappings to profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
resource_section = profile + "resourcemap"
if not config.has_section(resource_section):
config.add_section(resource_section)
while True:
key = raw_input(
'Insert the cloudstack resource id you wish to map: '
)
value = raw_input(
'Insert the cloudstack resource type you wish to map this to: '
)
config.set(resource_section, key, value)
add_more = raw_input(
'Do you wish to add more mappings? (Yes/No): ')
if add_more.lower() in ['no', 'n']:
break
return config
def _set_attribute_of_profile(config, profile, attribute, message, default):
"""
Set attribute of profile
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@param attribute: the attribute to set.
@param message: the message to prompt the user with.
@param default: the default value to use if none is entered.
@return: configparser configuration.
"""
if config.has_option(profile, attribute):
default = config.get(profile, attribute)
attribute_value = _read_in_config_attribute_or_use_default(
message, default)
config.set(profile, attribute, attribute_value)
return config
def _read_in_config_attribute_or_use_default(message, default):
"""
Add resource type mappings to profile.
@param message: the message to prompt the user with.
@param default: the default value to use if none is entered.
@return: configparser configuration.
"""
attribute = raw_input(message + ' [' + default + ']: ')
if attribute == '':
attribute = default
return attribute
| {
"content_hash": "33cf11aa9e8b52b39ba0fb17a1153cde",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 108,
"avg_line_length": 28.99283154121864,
"alnum_prop": 0.6515020398071455,
"repo_name": "terbolous/cloudstack-ec2stack",
"id": "0c7f9d40a9320ef5b8203ca999ab7afcef3942c3",
"size": "8933",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ec2stack/configure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "239301"
},
{
"name": "Shell",
"bytes": "5226"
}
],
"symlink_target": ""
} |
import json
import sys
def write_gender_cont_person():
try:
f_json = open('data_extracted/person_lookup.json','r',encoding="utf8")
except IOError:
print('Need to create person_lookup.json first. (Execute write_personlist_by_type.py)')
# Load person lookup created by write_personlist_by_type.py (Maps: *page of person* -> True, if it is a Person)
person_lookup = json.load(f_json)
print("Load complete: person_lookup.json")
f_json.close()
f_in = open('data_raw/genders_en.ttl','r', encoding="utf8")
f_out = open('data_extracted/filtered_gender.txt','w+', encoding="utf8")
gender_assignment=dict()
next(f_in) #First Line is comment with date
for line in f_in:
# Read gender assignments of DBpedia
splits=line.split()
subject=splits[0]
value=splits[2]
if person_lookup.get(subject)==True:
# Only keep the gender assignment if it belongs to a person
f_out.write(line)
gender_assignment[subject[1:-1]]=value
f_in.close()
f_out.close()
with open('data_extracted/gender_assignment.json','w+', encoding='utf8') as f_json:
json.dump(gender_assignment, f_json, ensure_ascii=False)
print('write_gender_cont_person - DONE')
if __name__ == "__main__":
if len(sys.argv)>1:
raise IOError("Overspecified")
write_gender_cont_person() | {
"content_hash": "74d4694559a668dd01cae7ef1d8d3846",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 112,
"avg_line_length": 30.261904761904763,
"alnum_prop": 0.7025963808025177,
"repo_name": "kandy-koblenz/people-networks",
"id": "fe8ceb1dc831455893c4e8ccebee6734d467c0f7",
"size": "1708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbpedia-data/write_gender_cont_person.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "225247"
},
{
"name": "Python",
"bytes": "91214"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
class Alexa(object):
def __init__(self, port):
self.port = port
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.port, GPIO.OUT)
GPIO.output(self.port, GPIO.HIGH)
self.on_state = GPIO.HIGH
self.off_state = not self.on_state
def set_on(self):
GPIO.output(self.port, self.on_state)
def set_off(self):
GPIO.output(self.port, self.off_state)
def is_on(self):
return GPIO.input(self.port) == self.on_state
def is_off(self):
return GPIO.input(self.port) == self.off_state
def toggle(self):
if self.is_on():
self.set_off()
else:
self.set_on()
def blink(self, t=0.2):
self.set_off()
time.sleep(t)
self.set_on()
| {
"content_hash": "bc8c25e799a2bf83024322ec8c75f554",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 54,
"avg_line_length": 23.228571428571428,
"alnum_prop": 0.5547355473554736,
"repo_name": "eddieruano/SentinelGreen",
"id": "0ce8a84e463d0368cfe7e26e891a3ffde0665953",
"size": "813",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trigger/Alexa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "57786"
},
{
"name": "C++",
"bytes": "185093"
},
{
"name": "Go",
"bytes": "1268"
},
{
"name": "Java",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "2015"
},
{
"name": "Makefile",
"bytes": "21912"
},
{
"name": "Objective-C",
"bytes": "481651"
},
{
"name": "Objective-C++",
"bytes": "4181"
},
{
"name": "Perl",
"bytes": "13306"
},
{
"name": "Python",
"bytes": "1816166"
},
{
"name": "Ruby",
"bytes": "72"
},
{
"name": "Shell",
"bytes": "15286"
},
{
"name": "Swift",
"bytes": "11715"
},
{
"name": "TypeScript",
"bytes": "4859"
}
],
"symlink_target": ""
} |
from unittest import TestCase, SkipTest
try:
from unittest.mock import Mock
except:
from mock import Mock
import uuid
from holoviews import Tiles
try:
import plotly.graph_objs as go
except:
go = None
from holoviews.streams import (
BoundsXY, BoundsX, BoundsY, RangeXY, RangeX, RangeY, Selection1D
)
try:
from holoviews.plotting.plotly.callbacks import (
RangeXYCallback, RangeXCallback, RangeYCallback,
BoundsXYCallback, BoundsXCallback, BoundsYCallback,
Selection1DCallback
)
except:
pass
def mock_plot(trace_uid=None):
# Build a mock to stand in for a PlotlyPlot subclass
if trace_uid is None:
trace_uid = str(uuid.uuid4())
plot = Mock()
plot.trace_uid = trace_uid
return plot
def build_callback_set(callback_cls, trace_uids, stream_type, num_streams=2):
"""
Build a collection of plots, callbacks, and streams for a given callback class and
a list of trace_uids
"""
plots = []
streamss = []
callbacks = []
eventss = []
for trace_uid in trace_uids:
plot = mock_plot(trace_uid)
streams, event_list = [], []
for _ in range(num_streams):
events = []
stream = stream_type()
def cb(events=events, **kwargs):
events.append(kwargs)
stream.add_subscriber(cb)
streams.append(stream)
event_list.append(events)
callback = callback_cls(plot, streams, None)
plots.append(plot)
streamss.append(streams)
callbacks.append(callback)
eventss.append(event_list)
return plots, streamss, callbacks, eventss
class TestCallbacks(TestCase):
def setUp(self):
if go is None:
raise SkipTest("Plotly required to test plotly callbacks")
self.fig_dict = go.Figure({
'data': [
{'type': 'scatter',
'y': [1, 2, 3],
'uid': 'first'},
{'type': 'bar',
'y': [1, 2, 3],
'uid': 'second',
'xaxis': 'x',
'yaxis': 'y'},
{'type': 'scatter',
'y': [1, 2, 3],
'uid': 'third',
'xaxis': 'x2',
'yaxis': 'y2'},
{'type': 'bar',
'y': [1, 2, 3],
'uid': 'forth',
'xaxis': 'x3',
'yaxis': 'y3'},
],
'layout': {
'title': {'text': 'Figure Title'}}
}).to_dict()
self.mapbox_fig_dict = go.Figure({
'data': [
{'type': 'scattermapbox', 'uid': 'first', 'subplot': 'mapbox'},
{'type': 'scattermapbox', 'uid': 'second', 'subplot': 'mapbox2'},
{'type': 'scattermapbox', 'uid': 'third', 'subplot': 'mapbox3'}
],
'layout': {
'title': {'text': 'Figure Title'},
}
}).to_dict()
# Precompute a pair of lat/lon, easting/northing, mapbox coord values
self.lon_range1, self.lat_range1 = (10, 30), (20, 40)
self.easting_range1, self.northing_range1 = Tiles.lon_lat_to_easting_northing(
self.lon_range1, self.lat_range1
)
self.easting_range1 = tuple(self.easting_range1)
self.northing_range1 = tuple(self.northing_range1)
self.mapbox_coords1 = [
[self.lon_range1[0], self.lat_range1[1]],
[self.lon_range1[1], self.lat_range1[1]],
[self.lon_range1[1], self.lat_range1[0]],
[self.lon_range1[0], self.lat_range1[0]]
]
self.lon_range2, self.lat_range2 = (-50, -30), (-70, -40)
self.easting_range2, self.northing_range2 = Tiles.lon_lat_to_easting_northing(
self.lon_range2, self.lat_range2
)
self.easting_range2 = tuple(self.easting_range2)
self.northing_range2 = tuple(self.northing_range2)
self.mapbox_coords2 = [
[self.lon_range2[0], self.lat_range2[1]],
[self.lon_range2[1], self.lat_range2[1]],
[self.lon_range2[1], self.lat_range2[0]],
[self.lon_range2[0], self.lat_range2[0]]
]
def testCallbackClassInstanceTracking(self):
# Each callback class should track all active instances of its own class in a
# weak value dictionary. Here we make sure that instances stay separated per
# class
plot1 = mock_plot()
plot2 = mock_plot()
plot3 = mock_plot()
# Check RangeXYCallback
rangexy_cb = RangeXYCallback(plot1, [], None)
self.assertIn(plot1.trace_uid, RangeXYCallback.instances)
self.assertIs(rangexy_cb, RangeXYCallback.instances[plot1.trace_uid])
# Check BoundsXYCallback
boundsxy_cb = BoundsXYCallback(plot2, [], None)
self.assertIn(plot2.trace_uid, BoundsXYCallback.instances)
self.assertIs(boundsxy_cb, BoundsXYCallback.instances[plot2.trace_uid])
# Check Selection1DCallback
selection1d_cb = Selection1DCallback(plot3, [], None)
self.assertIn(plot3.trace_uid, Selection1DCallback.instances)
self.assertIs(selection1d_cb, Selection1DCallback.instances[plot3.trace_uid])
# Check that objects don't show up as instances in the wrong class
self.assertNotIn(plot1.trace_uid, BoundsXYCallback.instances)
self.assertNotIn(plot1.trace_uid, Selection1DCallback.instances)
self.assertNotIn(plot2.trace_uid, RangeXYCallback.instances)
self.assertNotIn(plot2.trace_uid, Selection1DCallback.instances)
self.assertNotIn(plot3.trace_uid, RangeXYCallback.instances)
self.assertNotIn(plot3.trace_uid, BoundsXYCallback.instances)
def testRangeXYCallbackEventData(self):
for viewport in [
{'xaxis.range': [1, 4], 'yaxis.range': [-1, 5]},
{'xaxis.range[0]': 1, 'xaxis.range[1]': 4,
'yaxis.range[0]': -1, 'yaxis.range[1]': 5},
]:
event_data = RangeXYCallback.get_event_data_from_property_update(
"viewport", viewport, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'x_range': (1, 4), 'y_range': (-1, 5)},
'second': {'x_range': (1, 4), 'y_range': (-1, 5)},
})
def testRangeXCallbackEventData(self):
for viewport in [
{'xaxis.range': [1, 4], 'yaxis.range': [-1, 5]},
{'xaxis.range[0]': 1, 'xaxis.range[1]': 4,
'yaxis.range[0]': -1, 'yaxis.range[1]': 5},
]:
event_data = RangeXCallback.get_event_data_from_property_update(
"viewport", viewport, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'x_range': (1, 4)},
'second': {'x_range': (1, 4)},
})
def testRangeYCallbackEventData(self):
for viewport in [
{'xaxis.range': [1, 4], 'yaxis.range': [-1, 5]},
{'xaxis.range[0]': 1, 'xaxis.range[1]': 4,
'yaxis.range[0]': -1, 'yaxis.range[1]': 5},
]:
event_data = RangeYCallback.get_event_data_from_property_update(
"viewport", viewport, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'y_range': (-1, 5)},
'second': {'y_range': (-1, 5)},
})
def testMapboxRangeXYCallbackEventData(self):
relayout_data = {
'mapbox._derived': {"coordinates": self.mapbox_coords1},
'mapbox3._derived': {"coordinates": self.mapbox_coords2}
}
event_data = RangeXYCallback.get_event_data_from_property_update(
"relayout_data", relayout_data, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'x_range': self.easting_range1, 'y_range': self.northing_range1},
'third': {'x_range': self.easting_range2, 'y_range': self.northing_range2},
})
def testMapboxRangeXCallbackEventData(self):
relayout_data = {
'mapbox._derived': {"coordinates": self.mapbox_coords1},
'mapbox3._derived': {"coordinates": self.mapbox_coords2}
}
event_data = RangeXCallback.get_event_data_from_property_update(
"relayout_data", relayout_data, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'x_range': self.easting_range1},
'third': {'x_range': self.easting_range2},
})
def testMapboxRangeYCallbackEventData(self):
relayout_data = {
'mapbox._derived': {"coordinates": self.mapbox_coords1},
'mapbox3._derived': {"coordinates": self.mapbox_coords2}
}
event_data = RangeYCallback.get_event_data_from_property_update(
"relayout_data", relayout_data, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'y_range': self.northing_range1},
'third': {'y_range': self.northing_range2},
})
def testRangeCallbacks(self):
# Build callbacks
range_classes = [RangeXYCallback, RangeXCallback, RangeYCallback]
xyplots, xystreamss, xycallbacks, xyevents = build_callback_set(
RangeXYCallback, ['first', 'second', 'third', 'forth', 'other'],
RangeXY, 2
)
xplots, xstreamss, xcallbacks, xevents = build_callback_set(
RangeXCallback, ['first', 'second', 'third', 'forth', 'other'],
RangeX, 2
)
yplots, ystreamss, ycallbacks, yevents = build_callback_set(
RangeYCallback, ['first', 'second', 'third', 'forth', 'other'],
RangeY, 2
)
# Sanity check the length of the streams lists
for xystreams in xystreamss:
self.assertEqual(len(xystreams), 2)
# Change viewport on first set of axes
viewport1 = {'xaxis.range': [1, 4], 'yaxis.range': [-1, 5]}
for cb_cls in range_classes:
cb_cls.update_streams_from_property_update(
"viewport", viewport1, self.fig_dict
)
# Check that all streams attached to 'first' and 'second' plots were triggered
for xystream, xstream, ystream in zip(
xystreamss[0] + xystreamss[1],
xstreamss[0] + xstreamss[1],
ystreamss[0] + ystreamss[1],
):
assert xystream.x_range == (1, 4)
assert xystream.y_range == (-1, 5)
assert xstream.x_range == (1, 4)
assert ystream.y_range == (-1, 5)
# And that no other streams were triggered
for xystream, xstream, ystream in zip(
xystreamss[2] + xystreamss[3],
xstreamss[2] + xstreamss[3],
ystreamss[2] + ystreamss[3],
):
assert xystream.x_range is None
assert xystream.y_range is None
assert xstream.x_range is None
assert ystream.y_range is None
# Change viewport on second set of axes
viewport2 = {'xaxis2.range': [2, 5], 'yaxis2.range': [0, 6]}
for cb_cls in range_classes:
cb_cls.update_streams_from_property_update(
"viewport", viewport2, self.fig_dict
)
# Check that all streams attached to 'third' were triggered
for xystream, xstream, ystream in zip(
xystreamss[2], xstreamss[2], ystreamss[2]
):
assert xystream.x_range == (2, 5)
assert xystream.y_range == (0, 6)
assert xstream.x_range == (2, 5)
assert ystream.y_range == (0, 6)
# Change viewport on third set of axes
viewport3 = {'xaxis3.range': [3, 6], 'yaxis3.range': [1, 7]}
for cb_cls in range_classes:
cb_cls.update_streams_from_property_update(
"viewport", viewport3, self.fig_dict
)
# Check that all streams attached to 'forth' were triggered
for xystream, xstream, ystream in zip(
xystreamss[3], xstreamss[3], ystreamss[3]
):
assert xystream.x_range == (3, 6)
assert xystream.y_range == (1, 7)
assert xstream.x_range == (3, 6)
assert ystream.y_range == (1, 7)
# Check that streams attached to a trace not in this plot are not triggered
for xyevent, xevent, yevent in zip(
xyevents[4], xevents[4], yevents[4]
):
assert len(xyevent) == 0
assert len(yevent) == 0
assert len(yevent) == 0
def testBoundsXYCallbackEventData(self):
selected_data1 = {'range': {'x': [1, 4], 'y': [-1, 5]}}
event_data = BoundsXYCallback.get_event_data_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'bounds': (1, -1, 4, 5)},
'second': {'bounds': (1, -1, 4, 5)},
'third': {'bounds': None},
'forth': {'bounds': None}
})
def testBoundsXCallbackEventData(self):
selected_data1 = {'range': {'x': [1, 4], 'y': [-1, 5]}}
event_data = BoundsXCallback.get_event_data_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'boundsx': (1, 4)},
'second': {'boundsx': (1, 4)},
'third': {'boundsx': None},
'forth': {'boundsx': None}
})
def testBoundsYCallbackEventData(self):
selected_data1 = {'range': {'x': [1, 4], 'y': [-1, 5]}}
event_data = BoundsYCallback.get_event_data_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'boundsy': (-1, 5)},
'second': {'boundsy': (-1, 5)},
'third': {'boundsy': None},
'forth': {'boundsy': None}
})
def testMapboxBoundsXYCallbackEventData(self):
selected_data = {"range": {'mapbox2': [
[self.lon_range1[0], self.lat_range1[0]],
[self.lon_range1[1], self.lat_range1[1]]
]}}
event_data = BoundsXYCallback.get_event_data_from_property_update(
"selected_data", selected_data, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'bounds': None},
'second': {'bounds': (
self.easting_range1[0], self.northing_range1[0],
self.easting_range1[1], self.northing_range1[1]
)},
'third': {'bounds': None}
})
def testMapboxBoundsXCallbackEventData(self):
selected_data = {"range": {'mapbox': [
[self.lon_range1[0], self.lat_range1[0]],
[self.lon_range1[1], self.lat_range1[1]]
]}}
event_data = BoundsXCallback.get_event_data_from_property_update(
"selected_data", selected_data, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'boundsx': (
self.easting_range1[0], self.easting_range1[1],
)},
'second': {'boundsx': None},
'third': {'boundsx': None}
})
def testMapboxBoundsYCallbackEventData(self):
selected_data = {"range": {'mapbox3': [
[self.lon_range1[0], self.lat_range1[0]],
[self.lon_range1[1], self.lat_range1[1]]
]}}
event_data = BoundsYCallback.get_event_data_from_property_update(
"selected_data", selected_data, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'boundsy': None},
'second': {'boundsy': None},
'third': {'boundsy': (
self.northing_range1[0], self.northing_range1[1]
)},
})
def testBoundsCallbacks(self):
# Build callbacks
bounds_classes = [BoundsXYCallback, BoundsXCallback, BoundsYCallback]
xyplots, xystreamss, xycallbacks, xyevents = build_callback_set(
BoundsXYCallback, ['first', 'second', 'third', 'forth', 'other'],
BoundsXY, 2
)
xplots, xstreamss, xcallbacks, xevents = build_callback_set(
BoundsXCallback, ['first', 'second', 'third', 'forth', 'other'],
BoundsX, 2
)
yplots, ystreamss, ycallbacks, yevents = build_callback_set(
BoundsYCallback, ['first', 'second', 'third', 'forth', 'other'],
BoundsY, 2
)
# box selection on first set of axes
selected_data1 = {'range': {'x': [1, 4], 'y': [-1, 5]}}
for cb_cls in bounds_classes:
cb_cls.update_streams_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
# Check that all streams attached to 'first' and 'second' plots were triggered
for xystream, xstream, ystream in zip(
xystreamss[0] + xystreamss[1],
xstreamss[0] + xstreamss[1],
ystreamss[0] + ystreamss[1],
):
assert xystream.bounds == (1, -1, 4, 5)
assert xstream.boundsx == (1, 4)
assert ystream.boundsy == (-1, 5)
# Check that streams attached to plots in other subplots are called with None
# to clear their bounds
for xystream, xstream, ystream in zip(
xystreamss[2] + xystreamss[3],
xstreamss[2] + xstreamss[3],
ystreamss[2] + ystreamss[3],
):
assert xystream.bounds is None
assert xstream.boundsx is None
assert ystream.boundsy is None
# box select on second set of axes
selected_data2 = {'range': {'x2': [2, 5], 'y2': [0, 6]}}
for cb_cls in bounds_classes:
cb_cls.update_streams_from_property_update(
"selected_data", selected_data2, self.fig_dict
)
# Check that all streams attached to 'second' were triggered
for xystream, xstream, ystream in zip(
xystreamss[2], xstreamss[2], ystreamss[2],
):
assert xystream.bounds == (2, 0, 5, 6)
assert xstream.boundsx == (2, 5)
assert ystream.boundsy == (0, 6)
# box select on third set of axes
selected_data3 = {'range': {'x3': [3, 6], 'y3': [1, 7]}}
for cb_cls in bounds_classes:
cb_cls.update_streams_from_property_update(
"selected_data", selected_data3, self.fig_dict
)
# Check that all streams attached to 'third' were triggered
for xystream, xstream, ystream in zip(
xystreamss[3], xstreamss[3], ystreamss[3],
):
assert xystream.bounds == (3, 1, 6, 7)
assert xstream.boundsx == (3, 6)
assert ystream.boundsy == (1, 7)
# lasso select on first set of axes should clear all bounds
selected_data_lasso = {'lassoPoints': {'x': [1, 4, 2], 'y': [-1, 5, 2]}}
for cb_cls in bounds_classes:
cb_cls.update_streams_from_property_update(
"selected_data", selected_data_lasso, self.fig_dict
)
# Check that all streams attached to this figure are called with None
# to clear their bounds
for xystream, xstream, ystream in zip(
xystreamss[0] + xystreamss[1] + xystreamss[2] + xystreamss[3],
xstreamss[0] + xstreamss[1] + xstreamss[2] + xstreamss[3],
ystreamss[0] + ystreamss[1] + ystreamss[2] + ystreamss[3],
):
assert xystream.bounds is None
assert xstream.boundsx is None
assert ystream.boundsy is None
# Check that streams attached to plots not in this figure are not called
for xyevent, xevent, yevent in zip(
xyevents[4], xevents[4], yevents[4]
):
assert xyevent == []
assert xevent == []
assert yevent == []
def testSelection1DCallbackEventData(self):
selected_data1 = {'points': [
{"pointNumber": 0, "curveNumber": 0},
{"pointNumber": 2, "curveNumber": 0},
]}
event_data = Selection1DCallback.get_event_data_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
self.assertEqual(event_data, {
'first': {'index': [0, 2]},
'second': {'index': []},
'third': {'index': []},
'forth': {'index': []}
})
def testMapboxSelection1DCallbackEventData(self):
selected_data1 = {'points': [
{"pointNumber": 0, "curveNumber": 1},
{"pointNumber": 2, "curveNumber": 1},
]}
event_data = Selection1DCallback.get_event_data_from_property_update(
"selected_data", selected_data1, self.mapbox_fig_dict
)
self.assertEqual(event_data, {
'first': {'index': []},
'second': {'index': [0, 2]},
'third': {'index': []},
})
def testSelection1DCallback(self):
plots, streamss, callbacks, sel_events = build_callback_set(
Selection1DCallback, ['first', 'second', 'third', 'forth', 'other'],
Selection1D, 2
)
# Select points from the 'first' plot (first set of axes)
selected_data1 = {'points': [
{"pointNumber": 0, "curveNumber": 0},
{"pointNumber": 2, "curveNumber": 0},
]}
Selection1DCallback.update_streams_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
# Check that all streams attached to the 'first' plots were triggered
for stream, events in zip(streamss[0], sel_events[0]):
assert stream.index == [0, 2]
assert len(events) == 1
# Check that all streams attached to other plots in this figure were triggered
# with empty selection
for stream in streamss[1] + streamss[2] + streamss[3]:
assert stream.index == []
# Select points from the 'first' and 'second' plot (first set of axes)
selected_data1 = {'points': [
{"pointNumber": 0, "curveNumber": 0},
{"pointNumber": 1, "curveNumber": 0},
{"pointNumber": 1, "curveNumber": 1},
{"pointNumber": 2, "curveNumber": 1},
]}
Selection1DCallback.update_streams_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
# Check that all streams attached to the 'first' plot were triggered
for stream in streamss[0]:
assert stream.index == [0, 1]
# Check that all streams attached to the 'second' plot were triggered
for stream in streamss[1]:
assert stream.index == [1, 2]
# Check that all streams attached to other plots in this figure were triggered
# with empty selection
for stream in streamss[2] + streamss[3]:
assert stream.index == []
# Select points from the 'forth' plot (third set of axes)
selected_data1 = {'points': [
{"pointNumber": 0, "curveNumber": 3},
{"pointNumber": 2, "curveNumber": 3},
]}
Selection1DCallback.update_streams_from_property_update(
"selected_data", selected_data1, self.fig_dict
)
# Check that all streams attached to the 'forth' plot were triggered
for stream, events in zip(streamss[3], sel_events[3]):
assert stream.index == [0, 2]
# Check that streams attached to plots not in this figure are not called
for stream, events in zip(streamss[4], sel_events[4]):
assert len(events) == 0
| {
"content_hash": "6e3ae9dff5281975d3dea9544d563c11",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 87,
"avg_line_length": 37.08179012345679,
"alnum_prop": 0.5467143867826376,
"repo_name": "ioam/holoviews",
"id": "df614f4e8ee55d07f375864a218628c41ce721f5",
"size": "24029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holoviews/tests/plotting/plotly/test_callbacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1546"
},
{
"name": "HTML",
"bytes": "18997"
},
{
"name": "JavaScript",
"bytes": "20747"
},
{
"name": "Jupyter Notebook",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "3241652"
}
],
"symlink_target": ""
} |
"""
usage:
python nbmerge.py A.ipynb B.ipynb C.ipynb > merged.ipynb
"""
import io
import os
import sys
from IPython.nbformat import current
def merge_notebooks(filenames):
merged = None
for fname in filenames:
with io.open(fname, 'r', encoding='utf-8') as f:
nb = current.read(f, 'json')
if merged is None:
merged = nb
else:
merged.worksheets[0].cells.extend(nb.worksheets[0].cells)
merged.metadata.name += "_merged"
print current.writes(merged, 'json')
if __name__ == '__main__':
merge_notebooks(sys.argv[1:])
| {
"content_hash": "f902f2144fe3fc5fd4f2c1add43137fc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 69,
"avg_line_length": 22.923076923076923,
"alnum_prop": 0.6140939597315436,
"repo_name": "zaqwes8811/micro-apps",
"id": "ba01784e6208fa140ab72eda9efe7cd6fd654820",
"size": "596",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "self_driving/deps/Kalman_and_Bayesian_Filters_in_Python_master/pdf/nbmerge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "309556"
},
{
"name": "Assembly",
"bytes": "570069"
},
{
"name": "Batchfile",
"bytes": "56007"
},
{
"name": "C",
"bytes": "53062"
},
{
"name": "C#",
"bytes": "32208"
},
{
"name": "C++",
"bytes": "1108629"
},
{
"name": "CMake",
"bytes": "23718"
},
{
"name": "CSS",
"bytes": "186903"
},
{
"name": "Cuda",
"bytes": "9680"
},
{
"name": "Dart",
"bytes": "1158"
},
{
"name": "Dockerfile",
"bytes": "20181"
},
{
"name": "Go",
"bytes": "6640"
},
{
"name": "HTML",
"bytes": "2215958"
},
{
"name": "Haskell",
"bytes": "383"
},
{
"name": "Java",
"bytes": "140401"
},
{
"name": "JavaScript",
"bytes": "714877"
},
{
"name": "Jupyter Notebook",
"bytes": "25399728"
},
{
"name": "Kotlin",
"bytes": "713"
},
{
"name": "Lua",
"bytes": "2253"
},
{
"name": "MATLAB",
"bytes": "103"
},
{
"name": "Makefile",
"bytes": "33566"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "NSIS",
"bytes": "7481"
},
{
"name": "PHP",
"bytes": "59915"
},
{
"name": "Pascal",
"bytes": "2492"
},
{
"name": "Pawn",
"bytes": "3337"
},
{
"name": "Python",
"bytes": "1836093"
},
{
"name": "QML",
"bytes": "58517"
},
{
"name": "QMake",
"bytes": "4042"
},
{
"name": "R",
"bytes": "13753"
},
{
"name": "Ruby",
"bytes": "522"
},
{
"name": "Rust",
"bytes": "210"
},
{
"name": "Scheme",
"bytes": "113588"
},
{
"name": "Scilab",
"bytes": "1348"
},
{
"name": "Shell",
"bytes": "16112"
},
{
"name": "SourcePawn",
"bytes": "3316"
},
{
"name": "VBScript",
"bytes": "9376"
},
{
"name": "XSLT",
"bytes": "24926"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from utils import get_all_files
import codecs
import re
TAG_DELIMITER = ';'
def filter_tags(tags, regex):
tags = set(tags)
filtered_tags = []
for tag in tags:
if not regex.search(tag):
filtered_tags.append(tag)
return filtered_tags
def find_and_write_wiki_tags(directory, excluded_tag_file, output_filename='wiki-sense-tag-mapping.txt'):
files = get_all_files(directory, "*.clean.txt")
excluded_tag_set = open(excluded_tag_file).read().splitlines()
regex = re.compile("|".join(excluded_tag_set), re.I)
d = dict()
problematic_files = []
for i, fn in enumerate(files, 1):
if i % 10 == 0:
print("\r%d files read" % i, end='')
with codecs.open(fn, encoding='utf8') as f:
for line in f:
line = line.strip().split('\t')
is_original_wiki_article_for_sense = line[3] == 'True'
sense = line[2]
if is_original_wiki_article_for_sense and sense not in d:
tags = line[5:]
if len(tags) != 0:
filtered_tags = filter_tags(tags, regex)
d[sense] = (filtered_tags, line[4])
else:
problematic_files.append(fn)
print() # it's for previous print trick.
assert len(problematic_files) == 0, "Found error."
with codecs.open(output_filename, 'w', encoding='utf8') as f:
for sense, (tags, link) in d.items():
f.write(u"{}\t{}\t{}\n".format(sense, link, TAG_DELIMITER.join(tags)))
def get_wiki_tag_and_link_maps(tag_file):
tag_dict, link_dict = {}, {}
with codecs.open(tag_file, encoding='utf8') as f:
for line in f:
sense, link, tags = line.strip().split('\t')
tags = tags.split(TAG_DELIMITER)
tag_dict[sense] = tags
link_dict[sense] = link
return tag_dict, link_dict
if __name__ == '__main__':
import sys
find_and_write_wiki_tags(*sys.argv[1:])
| {
"content_hash": "836e857f474787cd782427efb1b066f6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 105,
"avg_line_length": 33.515625,
"alnum_prop": 0.5603729603729604,
"repo_name": "osmanbaskaya/coarse-wsd",
"id": "5f0054ae9c7191a6cce95615c1463d905fa1c1cf",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coarse-wsd/wiki/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "8026"
},
{
"name": "Makefile",
"bytes": "5421"
},
{
"name": "Python",
"bytes": "103973"
}
],
"symlink_target": ""
} |
import sys
from oslo_config import cfg
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck
from stevedore import driver as stevedore_driver
# Need to import to load config
from octavia.common import config # noqa: F401 pylint: disable=unused-import
from octavia.common import constants
from octavia.common import policy
from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver
from octavia.i18n import _
CONF = cfg.CONF
class Checks(upgradecheck.UpgradeCommands):
"""Contains upgrade checks
Various upgrade checks should be added as separate methods in this class
and added to _upgrade_checks tuple.
"""
def _check_persistence(self):
try:
pers_driver = tsk_driver.MysqlPersistenceDriver()
with pers_driver.get_persistence() as pers:
if pers.engine.dialect.name == 'sqlite':
return upgradecheck.Result(
upgradecheck.Code.WARNING,
_('Persistence database is using sqlite backend. '
'Verification required if persistence_connecton URL '
'has been set properly.'))
return pers
except Exception:
return upgradecheck.Result(upgradecheck.Code.FAILURE,
_('Failed to connect to persistence '
'backend for AmphoraV2 provider.'))
def _check_jobboard(self, persistence):
try:
jobboard_driver = stevedore_driver.DriverManager(
namespace='octavia.worker.jobboard_driver',
name=CONF.task_flow.jobboard_backend_driver,
invoke_args=(persistence,),
invoke_on_load=True).driver
with jobboard_driver.job_board(persistence) as jb:
if jb.connected:
return upgradecheck.Result(
upgradecheck.Code.SUCCESS,
_('Persistence database and Jobboard backend for '
'AmphoraV2 provider configured.'))
except Exception:
# Return FAILURE later
pass
return upgradecheck.Result(
upgradecheck.Code.FAILURE,
_('Failed to connect to jobboard backend for AmphoraV2 provider. '
'Check jobboard configuration options in task_flow config '
'section.'))
def _check_amphorav2(self):
default_provider_driver = CONF.api_settings.default_provider_driver
enabled_provider_drivers = CONF.api_settings.enabled_provider_drivers
if (default_provider_driver == constants.AMPHORAV2 or
constants.AMPHORAV2 in enabled_provider_drivers):
persistence = self._check_persistence()
if isinstance(persistence, upgradecheck.Result):
return persistence
return self._check_jobboard(persistence)
return upgradecheck.Result(upgradecheck.Code.SUCCESS,
_('AmphoraV2 provider is not enabled.'))
def _check_yaml_policy(self):
if CONF.oslo_policy.policy_file.lower().endswith('yaml'):
return upgradecheck.Result(upgradecheck.Code.SUCCESS,
_('The [oslo_policy] policy_file '
'setting is configured for YAML '
'policy file format.'))
if CONF.oslo_policy.policy_file.lower().endswith('json'):
return upgradecheck.Result(
upgradecheck.Code.WARNING,
_('The [oslo_policy] policy_file setting is configured for '
'JSON policy file format. JSON format policy files have '
'been deprecated by oslo policy. Please use the oslo policy '
'tool to convert your policy file to YAML format. See this '
'patch for more information: '
'https://review.opendev.org/733650'))
return upgradecheck.Result(upgradecheck.Code.FAILURE,
_('Unable to determine the [oslo_policy] '
'policy_file setting file format. '
'Please make sure your policy file is '
'in YAML format and has the suffix of '
'.yaml for the filename. Oslo policy '
'has deprecated the JSON file format.'))
_upgrade_checks = (
(_('AmphoraV2 Check'), _check_amphorav2),
(_('YAML Policy File'), _check_yaml_policy),
(_('Policy File JSON to YAML Migration'),
(common_checks.check_policy_json, {'conf': CONF})),
)
def main():
policy.Policy()
return upgradecheck.main(
CONF, project='octavia', upgrade_command=Checks())
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "f6aa150e5c04b18c2223554306e17061",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 43.68695652173913,
"alnum_prop": 0.5732484076433121,
"repo_name": "openstack/octavia",
"id": "f98d51dad4a7867933bee961d0ee4903572f95d8",
"size": "5631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/cmd/status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "60600"
},
{
"name": "Mako",
"bytes": "922"
},
{
"name": "Python",
"bytes": "6651664"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Shell",
"bytes": "117966"
}
],
"symlink_target": ""
} |
"""Checkpointing utilities."""
import os
import pickle
from absl import logging
import tensorflow as tf
class _PyWrapper(tf.train.experimental.PythonState):
"""Wraps a Python object for storage in an object-based checkpoint."""
def __init__(self, obj):
"""Specify an object to wrap.
Args:
obj: The object to save and restore (may be overwritten).
"""
self._obj = obj
@property
def object(self):
return self._obj
def serialize(self):
"""Callback to serialize the object."""
return pickle.dumps(self._obj)
def deserialize(self, string_value):
"""Callback to deserialize the array."""
self._obj = pickle.loads(string_value)
class _CheckpointState:
"""tf.Train.Checkpoint wrapper ensuring all fields are checkpointable."""
def __init__(self):
super().__setattr__('_checkpoint',
tf.train.Checkpoint(python_state=_PyWrapper({})))
@property
def checkpoint(self):
return self._checkpoint
def __setattr__(self, name, value):
self._checkpoint.python_state.object[name] = value
def __getattr__(self, name):
return self._checkpoint.python_state.object[name]
def keys(self):
return self._checkpoint.python_state.object.keys()
def items(self):
return self._checkpoint.python_state.object.items()
class Checkpointer:
"""Checkpoints python state using tf.train.Checkpoint."""
def __init__(self, directory, max_to_keep, restore_path=None):
self._directory = directory
self._max_to_keep = max_to_keep
self._first_restore_path = restore_path
self._experiment_states = {}
self._checkpoints = {}
logging.info('Storing checkpoint at: %s', directory)
def _internal_restore_path(self, checkpoint_name):
"""Returns a path to the checkpoint used for restore, or None."""
# If we have a checkpoint we own, return that.
restore_path = self.restore_path(checkpoint_name)
# Otherwise, check the read-only restore path.
if restore_path is None and self._first_restore_path is not None:
# We use the checkpoint metadata (state) to check whether the
# checkpoint we want actually exists.
# First restore path can be a directory or a specific checkpoint.
chk_state = tf.train.get_checkpoint_state(self._first_restore_path)
if chk_state is not None:
# The restore path is a directory, get the latest checkpoint from there.
restore_path = chk_state.model_checkpoint_path
else:
# Try with the the parent directory.
chk_state = tf.train.get_checkpoint_state(
os.path.dirname(self._first_restore_path))
if chk_state is not None and (
self._first_restore_path in chk_state.all_model_checkpoint_paths):
restore_path = self._first_restore_path
else:
restore_path = None
return restore_path
def get_experiment_state(self, checkpoint_name):
"""Returns the experiment state."""
if checkpoint_name not in self._experiment_states:
assert checkpoint_name not in self._checkpoints
state = _CheckpointState()
self._experiment_states[checkpoint_name] = state
self._checkpoints[checkpoint_name] = tf.train.CheckpointManager(
state.checkpoint,
os.path.join(self._directory, checkpoint_name),
self._max_to_keep,
checkpoint_name=checkpoint_name)
return self._experiment_states[checkpoint_name]
def can_be_restored(self, checkpoint_name):
"""Returns True if the checkpoint with the given name can be restored."""
return self._internal_restore_path(checkpoint_name) is not None
def restore(self, checkpoint_name):
"""Restores checkpoint state."""
save_path = self._internal_restore_path(checkpoint_name)
assert save_path is not None
checkpoint_manager = self._checkpoints[checkpoint_name]
checkpoint_manager.checkpoint.restore(save_path).assert_consumed()
logging.info('Restored checkpoint from: %s', save_path)
def restore_or_save(self, checkpoint_name):
if self.can_be_restored(checkpoint_name):
self.restore(checkpoint_name)
else:
self.save(checkpoint_name)
def save(self, checkpoint_name):
"""Saves the state to file."""
self._checkpoints[checkpoint_name].save()
self._first_restore_path = None
logging.info('Saved checkpoint at: %s', self.restore_path(checkpoint_name))
def restore_path(self, checkpoint_name):
"""Returns the restore path for this checkpoint."""
# Returns None if we didn't create any checkpoint yet.
chk_state = tf.train.get_checkpoint_state(
self._checkpoints[checkpoint_name].directory)
return None if chk_state is None else chk_state.model_checkpoint_path
| {
"content_hash": "95b5e1942b9e302d3045c8e904a41787",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 33.14685314685315,
"alnum_prop": 0.6822784810126582,
"repo_name": "deepmind/dm_c19_modelling",
"id": "036b3f3c08d8de9ebb435692f64e85d947c57f0b",
"size": "5470",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "modelling/training/checkpointing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9471"
},
{
"name": "Python",
"bytes": "287886"
},
{
"name": "Shell",
"bytes": "2922"
}
],
"symlink_target": ""
} |
from mssqlrelayclient import MSSQLRelayClient
from smbrelayclient import SMBRelayClient
from ldaprelayclient import LDAPRelayClient
from httprelayclient import HTTPRelayClient
from imaprelayclient import IMAPRelayClient | {
"content_hash": "976cbaa16eb4a3a5cde2b5f41002f7fd",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 43.8,
"alnum_prop": 0.91324200913242,
"repo_name": "tholum/PiBunny",
"id": "b55d6edbf8736dc67821e59a403e0917e7ba7a32",
"size": "219",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "system.d/library/tools_installer/tools_to_install/impacket/impacket/examples/ntlmrelayx/clients/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3527"
},
{
"name": "HTML",
"bytes": "195334"
},
{
"name": "JavaScript",
"bytes": "1156309"
},
{
"name": "PowerShell",
"bytes": "5359"
},
{
"name": "Python",
"bytes": "6368546"
},
{
"name": "Shell",
"bytes": "40720"
},
{
"name": "Visual Basic",
"bytes": "5660"
}
],
"symlink_target": ""
} |
import uuid
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_utils import timeutils
import requests
import six
from six.moves.urllib import parse as urlparse
from conveyor.conveyorheat.common import crypt
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.engine import api
from conveyor.conveyorheat.engine import scheduler
from conveyor.i18n import _
from conveyor.i18n import _LI
from conveyor.conveyorheat.objects import resource as resource_objects
from conveyor.conveyorheat.objects import software_config as \
software_config_object
from conveyor.conveyorheat.objects import software_deployment as \
software_deployment_object
from conveyor.conveyorheat.rpc import api as rpc_api
from conveyor.db import api as db_api
LOG = logging.getLogger(__name__)
class SoftwareConfigService(service.Service):
def show_software_config(self, cnxt, config_id):
sc = software_config_object.SoftwareConfig.get_by_id(cnxt, config_id)
return api.format_software_config(sc)
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
scs = software_config_object.SoftwareConfig.get_all(
cnxt,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
result = [api.format_software_config(sc, detail=False) for sc in scs]
return result
def create_software_config(self, cnxt, group, name, config,
inputs, outputs, options):
sc = software_config_object.SoftwareConfig.create(cnxt, {
'group': group,
'name': name,
'config': {
'inputs': inputs,
'outputs': outputs,
'options': options,
'config': config
},
'tenant': cnxt.tenant_id})
return api.format_software_config(sc)
def delete_software_config(self, cnxt, config_id):
software_config_object.SoftwareConfig.delete(cnxt, config_id)
def list_software_deployments(self, cnxt, server_id):
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
result = [api.format_software_deployment(sd) for sd in all_sd]
return result
def metadata_software_deployments(self, cnxt, server_id):
if not server_id:
raise ValueError(_('server_id must be specified'))
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
# filter out the sds with None config
flt_sd = six.moves.filterfalse(lambda sd: sd.config is None,
all_sd)
# sort the configs by config name, to give the list of metadata a
# deterministic and controllable order.
flt_sd_s = sorted(flt_sd, key=lambda sd: sd.config.name)
result = [api.format_software_config(sd.config) for sd in flt_sd_s]
return result
@resource_objects.retry_on_conflict
def _push_metadata_software_deployments(
self, cnxt, server_id, stack_user_project_id):
rs = db_api.resource_get_by_physical_resource_id(cnxt, server_id)
if not rs:
return
deployments = self.metadata_software_deployments(cnxt, server_id)
md = rs.rsrc_metadata or {}
md['deployments'] = deployments
rows_updated = db_api.resource_update(
cnxt, rs.id, {'rsrc_metadata': md}, rs.atomic_key)
if not rows_updated:
action = _('deployments of server %s') % server_id
raise exception.ConcurrentTransaction(action=action)
metadata_put_url = None
metadata_queue_id = None
for rd in rs.data:
if rd.key == 'metadata_put_url':
metadata_put_url = rd.value
if rd.key == 'metadata_queue_id':
metadata_queue_id = rd.value
if metadata_put_url:
json_md = jsonutils.dumps(md)
requests.put(metadata_put_url, json_md)
if metadata_queue_id:
project = stack_user_project_id
token = self._get_user_token(cnxt, rs, project)
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(project, token)
queue = zaqar.queue(metadata_queue_id)
queue.post({'body': md, 'ttl': zaqar_plugin.DEFAULT_TTL})
def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
container, object_name = urlparse.urlparse(
deploy_signal_id).path.split('/')[-2:]
swift_plugin = cnxt.clients.client_plugin('swift')
swift = swift_plugin.client()
try:
headers = swift.head_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise
lm = headers.get('last-modified')
last_modified = swift_plugin.parse_last_modified(lm)
prev_last_modified = sd.updated_at
if prev_last_modified:
# assume stored as utc, convert to offset-naive datetime
prev_last_modified = prev_last_modified.replace(tzinfo=None)
if prev_last_modified and (last_modified <= prev_last_modified):
return sd
try:
(headers, obj) = swift.get_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI(
'Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise
if obj:
self.signal_software_deployment(
cnxt, sd.id, jsonutils.loads(obj),
last_modified.isoformat())
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def _get_user_token(self, cnxt, rs, project):
user = password = None
for rd in rs.data:
if rd.key == 'password':
password = crypt.decrypt(rd.decrypt_method, rd.value)
if rd.key == 'user_id':
user = rd.value
keystone = cnxt.clients.client('keystone')
return keystone.stack_domain_user_token(
user_id=user, project_id=project, password=password)
def _refresh_zaqar_software_deployment(self, cnxt, sd, deploy_queue_id):
rs = db_api.resource_get_by_physical_resource_id(cnxt, sd.server_id)
project = sd.stack_user_project_id
token = self._get_user_token(cnxt, rs, project)
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(project, token)
queue = zaqar.queue(deploy_queue_id)
messages = list(queue.pop())
if messages:
self.signal_software_deployment(
cnxt, sd.id, messages[0].body, None)
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def check_software_deployment(self, cnxt, deployment_id, timeout):
def _check():
while True:
sd = self._show_software_deployment(cnxt, deployment_id)
if sd.status != rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
return
yield
scheduler.TaskRunner(_check)(timeout=timeout)
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
return api.format_software_deployment(sd)
def _show_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
if sd.status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
c = sd.config.config
input_values = dict((i['name'], i['value']) for i in c['inputs'])
transport = input_values.get('deploy_signal_transport')
if transport == 'TEMP_URL_SIGNAL':
sd = self._refresh_swift_software_deployment(
cnxt, sd, input_values.get('deploy_signal_id'))
elif transport == 'ZAQAR_SIGNAL':
sd = self._refresh_zaqar_software_deployment(
cnxt, sd, input_values.get('deploy_queue_id'))
return sd
def show_software_deployment(self, cnxt, deployment_id):
sd = self._show_software_deployment(cnxt, deployment_id)
return api.format_software_deployment(sd)
def create_software_deployment(self, cnxt, server_id, config_id,
input_values, action, status,
status_reason, stack_user_project_id,
deployment_id=None):
if server_id and not isinstance(server_id, six.string_types):
LOG.error(_LI('server_id %s must be string.') % server_id)
raise ValueError(_('server_id must be string.'))
if deployment_id is None:
deployment_id = str(uuid.uuid4())
sd = software_deployment_object.SoftwareDeployment.create(cnxt, {
'id': deployment_id,
'config_id': config_id,
'server_id': server_id,
'input_values': input_values,
'tenant': cnxt.tenant_id,
'stack_user_project_id': stack_user_project_id,
'action': action,
'status': status,
'status_reason': status_reason})
self._push_metadata_software_deployments(
cnxt, server_id, stack_user_project_id)
return api.format_software_deployment(sd)
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at):
if not deployment_id:
raise ValueError(_('deployment_id must be specified'))
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
status = sd.status
if not status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
# output values are only expected when in an IN_PROGRESS state
return
details = details or {}
output_status_code = rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
ov = sd.output_values or {}
status = None
status_reasons = {}
status_code = details.get(output_status_code)
if status_code and str(status_code) != '0':
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[output_status_code] = _(
'Deployment exited with non-zero status code: %s'
) % details.get(output_status_code)
event_reason = 'deployment failed (%s)' % status_code
else:
event_reason = 'deployment succeeded'
for output in sd.config.config['outputs'] or []:
out_key = output['name']
if out_key in details:
ov[out_key] = details[out_key]
if output.get('error_output', False):
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[out_key] = details[out_key]
event_reason = 'deployment failed'
for out_key in rpc_api.SOFTWARE_DEPLOYMENT_OUTPUTS:
ov[out_key] = details.get(out_key)
if status == rpc_api.SOFTWARE_DEPLOYMENT_FAILED:
# build a status reason out of all of the values of outputs
# flagged as error_output
status_reasons = [' : '.join((k, six.text_type(status_reasons[k])))
for k in status_reasons]
status_reason = ', '.join(status_reasons)
else:
status = rpc_api.SOFTWARE_DEPLOYMENT_COMPLETE
status_reason = _('Outputs received')
self.update_software_deployment(
cnxt, deployment_id=deployment_id,
output_values=ov, status=status, status_reason=status_reason,
config_id=None, input_values=None, action=None,
updated_at=updated_at)
# Return a string describing the outcome of handling the signal data
return event_reason
def update_software_deployment(self, cnxt, deployment_id, config_id,
input_values, output_values, action,
status, status_reason, updated_at):
update_data = {}
if config_id:
update_data['config_id'] = config_id
if input_values:
update_data['input_values'] = input_values
if output_values:
update_data['output_values'] = output_values
if action:
update_data['action'] = action
if status:
update_data['status'] = status
if status_reason:
update_data['status_reason'] = status_reason
if updated_at:
update_data['updated_at'] = timeutils.normalize_time(
timeutils.parse_isotime(updated_at))
else:
update_data['updated_at'] = timeutils.utcnow()
sd = software_deployment_object.SoftwareDeployment.update_by_id(
cnxt, deployment_id, update_data)
# only push metadata if this update resulted in the config_id
# changing, since metadata is just a list of configs
if config_id:
self._push_metadata_software_deployments(
cnxt, sd.server_id, sd.stack_user_project_id)
return api.format_software_deployment(sd)
def delete_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
software_deployment_object.SoftwareDeployment.delete(
cnxt, deployment_id)
self._push_metadata_software_deployments(
cnxt, sd.server_id, sd.stack_user_project_id)
| {
"content_hash": "65bb6f8c53dc3e88b4636e6150296f44",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 79,
"avg_line_length": 41.43604651162791,
"alnum_prop": 0.5952013469903185,
"repo_name": "Hybrid-Cloud/conveyor",
"id": "5edfbb1d9f8de024e5851845ab29551c80c2eb31",
"size": "14829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conveyor/conveyorheat/engine/service_software_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3789174"
},
{
"name": "Shell",
"bytes": "16567"
}
],
"symlink_target": ""
} |
import redis
from ast import literal_eval
from . import RoundRobinDb, range_func
class RedisRoundRobinDb(RoundRobinDb):
def __init__(self, redis_db):
server,port=redis_db.split(":")
port = int(port)
self.db = redis.StrictRedis(host=server, port=port, db=0)
self._init_db()
self._mins_cache = None
self._hours_cache = None
def _init_db(self):
if self.db.get("initialized"):
return # already initialized
self.db.set("initialized", True)
def _clear_db(self):
for ix in range_func(24):
self.db.delete('min%d' % ix)
self.db.delete('hour%d' % ix)
for ix in range_func(24,60):
self.db.delete('min%d' % ix)
self.db.delete('last_timestamp')
self.db.delete('initialized')
def _get_key_as_tuple(self, key):
data = self.db.get(key)
if data is None:
return None
return literal_eval(data)
def _get_filtered_cache(self, base, length):
cache = {ts:(ix, val) for ix,(ts, val) in filter(
lambda i: i[1] is not None,
[(d, self._get_key_as_tuple(base+str(d))) for d in range_func(length)])}
return cache
def _get_minutes(self):
if self._mins_cache is None:
self._mins_cache = self._get_filtered_cache('min', 60)
print("Minutes cache is %r" % self._mins_cache)
return self._mins_cache
def _get_hours(self):
if self._hours_cache is None:
self._hours_cache = self._get_filtered_cache('hour', 24)
print("Hours cache is %r" % self._hours_cache)
return self._hours_cache
def read_all(self, table):
if table.lower() == 'minutes':
data = self._get_minutes()
elif table.lower() == 'hours':
data = self._get_hours()
else:
raise ValueError("Table must be one of 'hours' or 'minutes'")
return [(val[0],val[1][1]) for val in
sorted(data.items(), key=lambda i: i[1][0])]
@property
def last_timestamp(self):
ts = self.db.get("last_timestamp")
return None if ts is None else int(ts)
def get_timestamp_data(self, timestamp, table):
if table.lower() == 'minutes':
ts_data = self._get_minutes().get(timestamp)
elif table.lower() == 'hours':
ts_data = self._get_hours().get(timestamp)
return ts_data
def get_timestamp_index(self, timestamp, table, default=None):
super(self.__class__, self).get_timestamp_index(timestamp, table)
ts_data = self.get_timestamp_data(timestamp, table)
if ts_data is None:
return default
else:
return ts_data[0]
def _update(self, table, index, timestamp, value):
keybase = 'min' if table.lower() == 'minutes' else 'hour'
self.db.set(keybase+str(index), (timestamp, value))
def get_timestamp_value(self, table, timestamp):
super(self.__class__, self).get_timestamp_value(table, timestamp)
ts_data = self.get_timestamp_data(timestamp, table)
return None if ts_data is None else ts_data[1]
def update_timestamp(self, table, timestamp, value):
super(self.__class__, self).update_timestamp(table, timestamp, value)
ts_index = self.get_timestamp_index(timestamp, table)
if ts_index is None:
raise ValueError("Timestamp does not exist in the database.")
else:
self._update(table, ts_index, timestamp, value)
def save_timestamps(self, data):
start_index = self.get_timestamp_index(self.last_timestamp, 'Minutes', -1) + 1
for ix in range_func(len(data['minutes'])):
ts, value = data['minutes'][ix]
self._update('minutes', (ix + start_index) % 60, ts, value)
start_index = self.get_timestamp_index(self.last_hour_timestamp, 'hours', -1) + 1
for ix in range_func(len(data['hours'])):
ts, value = data['hours'][ix]
self._update('hours', (ix + start_index) % 24, ts, value)
self.db.set("last_timestamp", data['minutes'][-1][0])
self._mins_cache = None
self._hours_cache = None
| {
"content_hash": "82b2460936770871b25aa0c8c1c73343",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 89,
"avg_line_length": 37.017391304347825,
"alnum_prop": 0.5759924829692271,
"repo_name": "adeadman/rrd-tool",
"id": "65d67b022f49d48324b45a771d03859ed682b051",
"size": "4257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "round_robin/redisdb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28960"
}
],
"symlink_target": ""
} |
from . import AWSObject
from . import AWSProperty
class Target(AWSProperty):
props = {
'TargetAddress': (basestring, False),
'TargetType': (basestring, False),
}
class NotificationRule(AWSObject):
resource_type = "AWS::CodeStarNotifications::NotificationRule"
props = {
'DetailType': (basestring, True),
'EventTypeIds': ([basestring], True),
'Name': (basestring, True),
'Resource': (basestring, True),
'Status': (basestring, False),
'Tags': (dict, False),
'Targets': ([Target], True),
}
| {
"content_hash": "38e7c553d4e3c9103c8336fccb54d1e7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 66,
"avg_line_length": 25.391304347826086,
"alnum_prop": 0.5958904109589042,
"repo_name": "ikben/troposphere",
"id": "a1e27ca4d59e223d01a146ae0ef7b69e88739db5",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troposphere/codestarnotifications.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1555"
},
{
"name": "Python",
"bytes": "790849"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
'''
Created on May 17, 2013
Example for creating LCIO events and filling them with MCParticles and SimTrackerHits.
@author: <a href="mailto:[email protected]">Christian Grefe</a>
'''
from __future__ import absolute_import, unicode_literals
from __future__ import print_function, division
from pyLCIO import EVENT, IMPL, IOIMPL, UTIL
from ROOT import TVector3, TLorentzVector, TRandom3, TMath
from time import time
import sys, math
from sixlcio.moves import range
def generateEvents( outputFileName, nEvents ):
random = TRandom3( 12345 )
# define a particle source
sourcePosition = TVector3( 0., 0., 0. )
sourceSpreadXY = 10.
pdgid = 13
charge = -1.
mass = 0.105658
momentum = TVector3( 0.3, 0.1, 10. )
runNumber = 321
# define a detector with positions for the tracker planes
detectorName = 'ToyTracker'
trackerPlanePositions = []
hitResolution = 0.01
planeNormal = TVector3( 0., 0., 1. )
for planeZ in [ 100., 250., 480., 510., 640. ]:
trackerPlanePositions.append( TVector3( 0., 0., planeZ ) )
# create a writer and open the output file
writer = IOIMPL.LCFactory.getInstance().createLCWriter()
writer.open( outputFileName, EVENT.LCIO.WRITE_NEW )
# create a run header and add it to the file (optional)
run = IMPL.LCRunHeaderImpl()
run.setRunNumber( runNumber )
run.setDetectorName( detectorName )
run.setDescription( 'This is a test run' )
writer.writeRunHeader( run )
for iEvent in range(nEvents):
# create an event and set its parameters
event = IMPL.LCEventImpl()
event.setEventNumber( iEvent )
event.setDetectorName( detectorName )
event.setRunNumber( runNumber )
event.setTimeStamp( int( time() * 1000000000. ) )
# create the mc particle collection
mcParticles = IMPL.LCCollectionVec( EVENT.LCIO.MCPARTICLE )
# calculate the origin of the particle
x = random.Gaus( sourcePosition.x(), sourceSpreadXY )
y = random.Gaus( sourcePosition.y(), sourceSpreadXY )
z = sourcePosition.z()
origin = TVector3( x, y, z )
# create a particle
mcParticle = IMPL.MCParticleImpl()
mcParticle.setPDG( pdgid )
mcParticle.setMass( mass )
mcParticle.setMomentumVec( momentum )
mcParticle.setGeneratorStatus( 1 )
mcParticle.setVertexVec( origin )
mcParticle.setTime( 0. )
mcParticles.addElement( mcParticle )
# create a tracker hit collection
trackerHits = IMPL.LCCollectionVec( EVENT.LCIO.SIMTRACKERHIT )
trackerHits.setFlag( UTIL.set_bit( trackerHits.getFlag(), EVENT.LCIO.THBIT_MOMENTUM ) )
# create an IDEncoder to store hit IDs
# defines the tags and the number of bits for the different bit fields
encodingString = 'system:3,layer:6'
idEncoder = UTIL.CellIDEncoder( IMPL.SimTrackerHitImpl )( encodingString, trackerHits )
# add a hit for each layer
for planePosition in trackerPlanePositions:
# calculate the intersection with the plane
distance = ( planePosition - origin ).Dot( planeNormal ) / momentum.Dot( planeNormal )
intersect = TVector3( momentum )
intersect.SetMag( distance )
# smear the hit position with the resolution
hitX = random.Gaus( intersect.x(), hitResolution )
hitY = random.Gaus( intersect.x(), hitResolution )
hitPosition = TVector3( hitX, hitY, intersect.z() )
# build the tracker hit
trackerHit = IMPL.SimTrackerHitImpl()
trackerHit.setPositionVec( hitPosition )
trackerHit.setMomentumVec( momentum )
trackerHit.setMCParticle( mcParticle )
trackerHit.setTime( distance / TMath.C() )
trackerHit.setEDep( 0.1 )
# set the cell ID
idEncoder.reset()
idEncoder['layer'] = trackerPlanePositions.index( planePosition )
idEncoder['system'] = 1
idEncoder.setCellID( trackerHit )
trackerHits.addElement( trackerHit )
event.addCollection( mcParticles, EVENT.LCIO.MCPARTICLE )
event.addCollection( trackerHits, 'SimTrackerHits' )
writer.writeEvent( event )
writer.flush()
writer.close()
def usage():
print('Generates an MCParticle with associated SimTrackerHits for each event')
print('Usage:\n python %s <outputFile> <nEvents>' % (sys.argv[0]))
if __name__ == '__main__':
if len( sys.argv ) < 3:
usage()
sys.exit( 1 )
generateEvents( sys.argv[1], int( sys.argv[2] ) )
| {
"content_hash": "118e6e73f7a41a4e09f2b895ce03f2bf",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 98,
"avg_line_length": 36.621212121212125,
"alnum_prop": 0.6263963591228796,
"repo_name": "petricm/LCIO",
"id": "70b0fd3b2ec09801df05873d63feb9f7944b922b",
"size": "4834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/python/EventBuilder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "2118"
},
{
"name": "Batchfile",
"bytes": "1132"
},
{
"name": "C",
"bytes": "61259"
},
{
"name": "C++",
"bytes": "1562823"
},
{
"name": "CMake",
"bytes": "108579"
},
{
"name": "Fortran",
"bytes": "63741"
},
{
"name": "HTML",
"bytes": "556"
},
{
"name": "Java",
"bytes": "442394"
},
{
"name": "Pascal",
"bytes": "14383"
},
{
"name": "Python",
"bytes": "62661"
},
{
"name": "Shell",
"bytes": "8080"
},
{
"name": "TeX",
"bytes": "39624"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class PedidosConfig(AppConfig):
name = 'pedidos'
| {
"content_hash": "4da4c0178e2af6b5cc8237702a92291c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 18.571428571428573,
"alnum_prop": 0.7538461538461538,
"repo_name": "vallemrv/tpvB3",
"id": "acc2041c333d009c6e2880319956dcf323279ab4",
"size": "130",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "localhost/pedidos/apps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28419"
},
{
"name": "HTML",
"bytes": "76010"
},
{
"name": "JavaScript",
"bytes": "640565"
},
{
"name": "Python",
"bytes": "467690"
}
],
"symlink_target": ""
} |
import sys
import gi
import time
gi.require_version("Tcam", "1.0")
gi.require_version("Gst", "1.0")
gi.require_version("GLib", "2.0")
from gi.repository import Tcam, Gst, GLib
def main():
Gst.init(sys.argv) # init gstreamer
# this line sets the gstreamer default logging level
# it can be removed in normal applications
# gstreamer logging can contain verry useful information
# when debugging your application
# see https://gstreamer.freedesktop.org/documentation/tutorials/basic/debugging-tools.html
# for further details
Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
serial = None
pipeline = Gst.parse_launch("tcambin name=source ! videoconvert ! ximagesink")
source = pipeline.get_by_name("source")
# serial is defined, thus make the source open that device
if serial is not None:
source.set_property("serial", serial)
pipeline.set_state(Gst.State.PLAYING)
# stream for 2 seconds before switching to trigger mode
# this is simply to show that the device is running
time.sleep(2)
try:
source.set_tcam_enumeration("TriggerMode", "On")
wait = True
while wait:
input_text = input("Press 'Enter' to trigger an image.\n q + enter to stop the stream.")
if input_text == "q":
break
else:
ret = source.set_tcam_command("TriggerSoftware")
if ret:
print("=== Triggered image. ===\n")
else:
print("!!! Could not trigger. !!!\n")
# deactivate trigger mode
# this is simply to prevent confusion when the camera ist started without wanting to trigger
source.set_tcam_enumeration("TriggerMode", "Off")
except GLib.Error as e:
print(e.message)
# this stops the pipeline and frees all resources
pipeline.set_state(Gst.State.NULL)
if __name__ == "__main__":
main()
| {
"content_hash": "5c8020274f9f5561e1193021f5aab36f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 100,
"avg_line_length": 28.608695652173914,
"alnum_prop": 0.6312056737588653,
"repo_name": "TheImagingSource/tiscamera",
"id": "187b67a41409888872ce14c76b3e5150493d6f20",
"size": "2705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python/06-softwaretrigger.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "80638"
},
{
"name": "C++",
"bytes": "2287603"
},
{
"name": "CMake",
"bytes": "170765"
},
{
"name": "Python",
"bytes": "41359"
},
{
"name": "Shell",
"bytes": "15386"
}
],
"symlink_target": ""
} |
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.utils.safestring import mark_safe
from .settings import WIDGET_STORAGE_ID
class OrderedFilteredSelectMultiple(FilteredSelectMultiple):
@property
def media(self):
media = super(OrderedFilteredSelectMultiple, self).media
media.add_css({
'all': [
static('ordered_m2m/widget.css'),
]
})
media.add_js([
static('ordered_m2m/widget.js'),
])
return media
def render(self, *args, **kwargs):
output = super(OrderedFilteredSelectMultiple, self).render(*args, **kwargs)
output += '''
<script>
orderedFilteredSelectMultiple = window.orderedFilteredSelectMultiple || {};
(function(obj) {
obj.storageID = obj.storageID || '%s';
obj.targets = obj.targets || [];
obj.targets.push({
id: '%s',
field: '%s'
});
})(orderedFilteredSelectMultiple);
</script>
''' % (
# JSON dump element's ID
WIDGET_STORAGE_ID,
# Element ID
kwargs['attrs']['id'] + '_filter',
# Field name
kwargs['attrs']['id'].strip('id_'),
)
return mark_safe(output) | {
"content_hash": "cac1f263a8cc3142c5c27e597f22b960",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 87,
"avg_line_length": 31.955555555555556,
"alnum_prop": 0.5403337969401947,
"repo_name": "jnovinger/django-ordered-m2m",
"id": "6dea7be73d2e0090fb4f1483818aca47fddb1daf",
"size": "1438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ordered_m2m/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "845"
},
{
"name": "JavaScript",
"bytes": "4798"
},
{
"name": "Python",
"bytes": "5468"
}
],
"symlink_target": ""
} |
from django.conf import settings
class InvalidRevision(Exception):
pass
class VCS(object):
'''
Defines the public API for all subclasses
'''
repo_clone_dir = settings.REPO_CLONE_DIR
def __init__(self, project):
self.project = project
self.checkout_dir = project.get_checkout_directory()
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def checkout(self, ref):
raise NotImplementedError
| {
"content_hash": "2f704ef48a9bc4df5723c832ecd1c96a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 60,
"avg_line_length": 19.615384615384617,
"alnum_prop": 0.6568627450980392,
"repo_name": "joealcorn/berth.cc",
"id": "f3aef6c3ca3a19fa17e96acd950cceb901d81588",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "berth/vcs/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1007"
},
{
"name": "Python",
"bytes": "42875"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('layout_page', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='layoutpage',
name='publishing_is_draft',
field=models.BooleanField(default=True, db_index=True, editable=False),
preserve_default=True,
),
migrations.AddField(
model_name='layoutpage',
name='publishing_linked',
field=models.OneToOneField(related_name='publishing_draft', null=True, on_delete=django.db.models.deletion.SET_NULL, editable=False, to='layout_page.LayoutPage'),
preserve_default=True,
),
migrations.AddField(
model_name='layoutpage',
name='publishing_modified_at',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
preserve_default=True,
),
migrations.AddField(
model_name='layoutpage',
name='publishing_published_at',
field=models.DateTimeField(null=True, editable=False),
preserve_default=True,
),
]
| {
"content_hash": "06efa213a6f88294c0554a8b0864d337",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 174,
"avg_line_length": 33.58974358974359,
"alnum_prop": 0.6160305343511451,
"repo_name": "ic-labs/django-icekit",
"id": "7a7cdc7a2d93ee858655b7f4f9dfeacb2df5c681",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "icekit/page_types/layout_page/migrations/0002_auto_20160419_2209.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
from ethereum import blocks
from ethereum.db import DB
from ethereum.config import Env
from pyethapp.utils import merge_dict
from pyethapp.utils import update_config_from_genesis_json
import pyethapp.config as konfig
from pyethapp.profiles import PROFILES
def test_genesis():
for profile in ['frontier']: # fixme olympics
config = dict(eth=dict())
# Set config values based on profile selection
merge_dict(config, PROFILES[profile])
# Load genesis config
update_config_from_genesis_json(config, config['eth']['genesis'])
konfig.update_config_with_defaults(config, {'eth': {'block': blocks.default_config}})
print config['eth'].keys()
bc = config['eth']['block']
print bc.keys()
env = Env(DB(), bc)
genesis = blocks.genesis(env)
print 'genesis.hash', genesis.hash.encode('hex')
print 'expected', config['eth']['genesis_hash']
assert genesis.hash == config['eth']['genesis_hash'].decode('hex')
if __name__ == '__main__':
test_genesis()
| {
"content_hash": "e9af9af9793518872658c33bba0dc8de",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 93,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.6497652582159624,
"repo_name": "vaporry/pyethapp",
"id": "da7791a6ed38f7c4f335f54f2016e46a849dca46",
"size": "1065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyethapp/tests/test_genesis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "232955"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class NextVenue(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the NextVenue Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(NextVenue, self).__init__(temboo_session, '/Library/Foursquare/Venues/NextVenue')
def new_input_set(self):
return NextVenueInputSet()
def _make_result_set(self, result, path):
return NextVenueResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return NextVenueChoreographyExecution(session, exec_id, path)
class NextVenueInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the NextVenue
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) Your Foursquare client ID, obtained after registering at Foursquare. Required unless using the OauthToken input.)
"""
super(NextVenueInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) Your Foursquare client secret, obtained after registering at Foursquare. Required unless using the OauthToken input.)
"""
super(NextVenueInputSet, self)._set_input('ClientSecret', value)
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((conditional, string) The Foursquare API OAuth token string. Required unless specifying the ClientID and ClientSecret.)
"""
super(NextVenueInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(NextVenueInputSet, self)._set_input('ResponseFormat', value)
def set_VenueID(self, value):
"""
Set the value of the VenueID input for this Choreo. ((required, string) The ID of the venue you want to see next venue information about.)
"""
super(NextVenueInputSet, self)._set_input('VenueID', value)
class NextVenueResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the NextVenue Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class NextVenueChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return NextVenueResultSet(response, path)
| {
"content_hash": "d0e781c0952feb8d07deaf36cc319940",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 205,
"avg_line_length": 44.38157894736842,
"alnum_prop": 0.6999703528016602,
"repo_name": "jordanemedlock/psychtruths",
"id": "4c3885bf48297e13e4659d7f9188d0346d83c8a0",
"size": "4259",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Foursquare/Venues/NextVenue.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from canari.maltego.utils import debug
from canari.framework import configure
from canari.maltego.entities import Phrase
from canari.maltego.message import Label, UIMessage
from common.entities import Comment, Actor, Case, CoursesOfAction, Incident, Indicator, TTP
from common.client import search, encode_to_utf8, lower, ThreatCentralError
__author__ = 'Bart Otten'
__copyright__ = '(c) Copyright [2016] Hewlett Packard Enterprise Development LP'
__credits__ = []
__license__ = 'Apache 2.0'
__version__ = '1'
__maintainer__ = 'Bart Otten'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform'
]
@configure(
label='Search Comment in Threat Central',
description='Searches Comment in Threat Central',
uuids=['threatcentral.v2.CommentToThreatCentral'],
inputs=[('Threat Central', Comment)],
debug=False,
remote=False
)
# TODO : This transform works the same as Phrase to Threat Central
def dotransform(request, response, config):
try:
results = search(request.value, size=10, pages=1)
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
else:
try:
for result in results:
rtype = lower(result.get('type'))
if result.get('tcScore'):
weight = int(result.get('tcScore'))
else:
weight = 1
# Title ID Description
if rtype == 'actor':
# Check Title, if no title get resource > name
# Actor entity can have an empty title field
if result.get('title'):
e = Actor(encode_to_utf8(result.get('title')), weight=weight)
else:
e = Actor(encode_to_utf8(result.get('resource', dict()).get('name')), weight=weight)
e.name = encode_to_utf8(result.get('resource', dict()).get('name'))
e.actor = encode_to_utf8(result.get('resource', dict()).get('name'))
elif rtype == 'case':
e = Case(encode_to_utf8(result.get('title')), weight=weight)
elif rtype == 'coursesofactions':
e = CoursesOfAction(encode_to_utf8(result.get('title')), weight=weight)
elif rtype == 'indicator':
e = Indicator(encode_to_utf8(result.get('title')), weight=weight)
elif rtype == 'incident':
e = Incident(encode_to_utf8(result.get('title')), weight=weight)
# elif rtype == 'tacticstechniquesandprocedures':
elif rtype == 'ttp':
e = TTP(encode_to_utf8(result.get('title')), weight=weight)
else:
# To be safe
e = Phrase(encode_to_utf8(result.get('title')), weight=weight)
debug(rtype)
e.title = encode_to_utf8(result.get('title'))
e.resourceId = result.get('id')
if result.get('description'):
e += Label('Description', '<br/>'.join(encode_to_utf8(result.get('description',
'')).split('\n')))
response += e
except AttributeError as err:
response += UIMessage('Error: {}'.format(err), type='PartialError')
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
except TypeError:
return response
return response
| {
"content_hash": "3a8e02b883a1d23ff8b8a89cb67b020b",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 108,
"avg_line_length": 40.95505617977528,
"alnum_prop": 0.5486968449931413,
"repo_name": "ThreatCentral/blackberries",
"id": "49d94660ccad9ff99f14ab07f9e89f62a85c7380",
"size": "4224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ThreatCentral/transforms/CommentToThreatCentral.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "339767"
}
],
"symlink_target": ""
} |
"""
Name: 'Nexus Buddy 2 (.br2)...'
Blender: 249
Group: 'Export'
Tooltip: 'Export to Nexus Buddy 2 format (.br2).'
"""
__author__ = ["Deliverator"]
__url__ = ("")
__version__ = "0.1"
__bpydoc__ = """\
Nexus Buddy 2 Import
"""
######################################################
# Importing modules
######################################################
import Blender
import bpy
import BPyMesh
import BPyObject
import BPyMessages
from Blender.Mathutils import Matrix, Vector, RotationMatrix
rotMatrix_z90_4x4 = RotationMatrix(90, 4, 'z')
def getTranslationOrientation(ob, file):
if isinstance(ob, Blender.Types.BoneType):
#file.write('\n CBone Name:\n%s\n' % ob.name)
#file.write('\n CBone Matrix:\n%s\n' % ob.matrix['ARMATURESPACE'])
matrix = rotMatrix_z90_4x4 * ob.matrix['ARMATURESPACE']
#file.write('\nmtx4_z90 * CBone Matrix:\n%s\n' % matrix)
parent = ob.parent
if parent:
par_matrix = rotMatrix_z90_4x4 * parent.matrix['ARMATURESPACE']
matrix = matrix * par_matrix.copy().invert()
#file.write('\nparent adj CBone Matrix:\n%s\n' % matrix)
matrix_rot = matrix.rotationPart()
loc = tuple(matrix.translationPart())
rot = matrix_rot.toQuat()
else:
matrix = ob.matrixWorld
if matrix:
loc = tuple(matrix.translationPart())
matrix_rot = matrix.rotationPart()
rot = tuple(matrix_rot.toQuat())
else:
raise "error: this should never happen!"
return loc, rot
def getBoneTreeDepth(bone, currentCount):
if (bone.hasParent()):
currentCount = currentCount + 1
return getBoneTreeDepth(bone.parent, currentCount)
else:
return currentCount
def meshNormalizedWeights(mesh):
try:
groupNames, vWeightList = BPyMesh.meshWeight2List(mesh)
except:
return [],[]
if not groupNames:
return [],[]
for i, vWeights in enumerate(vWeightList):
tot = 0.0
for w in vWeights:
tot+=w
#print 'i:%d tot:%f' % (i, tot)
if tot:
for j, w in enumerate(vWeights):
vWeights[j] = w/tot
#if w/tot > 0:
#print 'i:%d j:%d w:%f w/tot:%f' % (i, j, w, vWeights[j])
return groupNames, vWeightList
def getBoneWeights(boneName, weights):
if boneName in weights[0]:
group_index = weights[0].index(boneName)
vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]]
else:
vgroup_data = []
return vgroup_data
def saveBR2(filename):
if not filename.lower().endswith('.br2'):
filename += '.br2'
if not BPyMessages.Warning_SaveOver(filename):
return
print "Start BR2 Export..."
Blender.Window.WaitCursor(1)
file = open( filename, 'wb')
scene = Blender.Scene.GetCurrent()
allObjects = scene.objects
filedata = "// Nexus Buddy BR2 - Exported from Blender for import to Nexus Buddy 2\n"
modelObs = {}
modelMeshes = {}
# will need list of these for multi-skeleton
boneIds = {}
for object in allObjects:
if object.type == 'Armature':
modelObs[object.name] = object
if object.type == 'Mesh':
parentArmOb = BPyObject.getObjectArmature(object)
if not parentArmOb.name in modelMeshes:
modelMeshes[parentArmOb.name] = []
modelMeshes[parentArmOb.name].append(object)
for modelObName in modelObs.keys():
# Write Skeleton
filedata += "skeleton\n"
armOb = modelObs[modelObName]
armature = armOb.data
# Calc bone depths and sort
boneDepths = []
for bone in armature.bones.values():
boneDepth = getBoneTreeDepth(bone, 0)
boneDepths.append((bone, boneDepth))
boneDepths = sorted(boneDepths, key=lambda k: k[0].name)
boneDepths = sorted(boneDepths, key=lambda k: k[1])
sortedBones = boneDepths
for boneid, boneTuple in enumerate(sortedBones):
boneIds[boneTuple[0].name] = boneid
#print 'boneId: %d: %s (depth:%s)' % (boneid, boneTuple[0].name, sortedBones[boneid][1])
# Write World Bone
filedata += '%d "%s" %d ' % (0, armOb.name, -1)
filedata += '%.8f %.8f %.8f ' % (0.0, 0.0, 0.0)
filedata += '%.8f %.8f %.8f %.8f ' % (0.0, 0.0, 0.0, 1.0)
filedata += '%.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0)
for boneid, boneTuple in enumerate(sortedBones):
bone = boneTuple[0]
boneDepth = boneTuple[1]
#boneid = boneid + 1 # World bone is zero
position, orientationQuat = getTranslationOrientation(bone, file)
# Get Inverse World Matrix for bone
t = bone.matrix['ARMATURESPACE'].copy().invert()
invWorldMatrix = Matrix([t[0][1], -t[0][0], t[0][2], t[0][3]],
[t[1][1], -t[1][0], t[1][2], t[1][3]],
[t[2][1], -t[2][0], t[2][2], t[2][3]],
[t[3][1], -t[3][0], t[3][2], t[3][3]])
outputBoneName = bone.name
if len(outputBoneName) == 29:
for item in armOb.getAllProperties():
if (("B_" + outputBoneName) == item.getName()):
outputBoneName = item.getData()
print 'Decode Bone Name: "%s" >' % item.getName()
print ' "%s"' % item.getData()
break
filedata += '%d "%s" ' % (boneid + 1, outputBoneName) # Adjust bone ids + 1 as zero is the World Bone
parentBoneId = 0
if bone.hasParent():
parentBoneId = boneIds[bone.parent.name] + 1 # Adjust bone ids + 1 as zero is the World Bone
filedata += '%d ' % parentBoneId
filedata +='%.8f %.8f %.8f ' % (position[0], position[1], position[2])
filedata +='%.8f %.8f %.8f %.8f ' % (orientationQuat[1], orientationQuat[2], orientationQuat[3], orientationQuat[0]) # GR2 uses x,y,z,w for Quaternions rather than w,x,y,z
filedata += '%.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f' % (invWorldMatrix[0][0], invWorldMatrix[0][1], invWorldMatrix[0][2], invWorldMatrix[0][3],
invWorldMatrix[1][0], invWorldMatrix[1][1], invWorldMatrix[1][2], invWorldMatrix[1][3],
invWorldMatrix[2][0], invWorldMatrix[2][1], invWorldMatrix[2][2], invWorldMatrix[2][3],
invWorldMatrix[3][0], invWorldMatrix[3][1], invWorldMatrix[3][2], invWorldMatrix[3][3])
#End of bone line
filedata += "\n"
filedata += 'meshes:%d\n' % len(modelMeshes[modelObName])
for meshObject in modelMeshes[modelObName]:
mesh = meshObject.data
meshName = meshObject.name
filedata += 'mesh:"%s"\n' % meshName
parentArmOb = BPyObject.getObjectArmature(meshObject)
# Fetch long mesh names from Armature properties
if len(meshName) == 19:
for item in parentArmOb.getAllProperties():
if ("M_" + meshName == item.getName()):
meshName = item.getData()
print 'Decode Mesh Name: %s > %s' % (item.getName(), item.getData())
break
#file.write('meshname:%s\n' % meshName)
#file.write('parent Arm:%s\n' % parentArmOb)
weights = meshNormalizedWeights(mesh)
vertexBoneWeights = {}
for boneName in parentArmOb.data.bones.keys():
vgroupDataForBone = getBoneWeights(boneName, weights)
#file.write('bone:%s vg:%s\n' % (boneName, vgroupDataForBone))
for vgData in vgroupDataForBone:
vertexId = vgData[0]
weight = vgData[1]
if not vertexId in vertexBoneWeights:
vertexBoneWeights[vertexId] = []
vertexBoneWeights[vertexId].append((boneName, weight))
#file.write('vert:%d bone:%s \n' % (vertexId, (boneName, weight)))
grannyVertexBoneWeights = {}
for vertId in vertexBoneWeights.keys():
#file.write('vert:%d ' % vertId)
boneIdsList = []
boneWeightsList = []
firstBoneId = 0
for i in range(4):
if i < len(vertexBoneWeights[vertId]):
vertexBoneWeightTuple = vertexBoneWeights[vertId][i]
boneName = vertexBoneWeightTuple[0]
boneIdsList.append(boneIds[boneName] + 1)
boneWeightsList.append(round(vertexBoneWeightTuple[1] * 255))
if i == 0:
firstBoneId = boneIds[boneName] + 1
else:
boneIdsList.append(firstBoneId)
boneWeightsList.append(0)
runningTotal = 0
for i, weight in enumerate(boneWeightsList):
runningTotal = runningTotal + weight
if runningTotal > 255:
boneWeightsList[i] = weight - 1
break
if not vertId in grannyVertexBoneWeights:
grannyVertexBoneWeights[vertId] = []
grannyVertexBoneWeights[vertId] = (boneIdsList, boneWeightsList)
#file.write('%s %s ' % (boneIdsList, boneWeightsList))
#file.write("\n")
position, orientationQuat = getTranslationOrientation(meshObject, file)
#file.write('position:%.8f %.8f %.8f\n' % (position[0], position[1], position[2]))
#file.write('orientationQuat:%.8f %.8f %.8f %.8f\n' % (orientationQuat[1], orientationQuat[2], orientationQuat[3], orientationQuat[0]))
#file.write(meshName+"\n")
filedata += "vertices\n"
# Determine unique vertex/UVs for output
uniqueVertSet = set()
uniqueVertUVIndexes = {}
uniqueVertUVs = []
currentVertUVIndex = 0
currentTriangleId = 0
triangleVertUVIndexes = []
for triangle in mesh.faces:
vertIds = [v.index for v in triangle]
vertIds = tuple(vertIds)
triangleVertUVIndexes.append([])
for i, uv in enumerate(triangle.uv):
vertexId = vertIds[i]
uvt = tuple(uv)
vertSig = '%i|%.8f|%.8f' % (vertexId, uvt[0], uvt[1])
if vertSig in uniqueVertSet:
triangleVertUVIndex = uniqueVertUVIndexes[vertSig]
else:
uniqueVertSet.add(vertSig)
uniqueVertUVIndexes[vertSig] = currentVertUVIndex
uniqueVertUVs.append((vertexId, uvt[0], uvt[1]))
triangleVertUVIndex = currentVertUVIndex
currentVertUVIndex = currentVertUVIndex + 1
triangleVertUVIndexes[currentTriangleId].append(triangleVertUVIndex)
currentTriangleId = currentTriangleId + 1
meshVerts = {}
for i,vert in enumerate(mesh.verts):
meshVerts[i] = vert
# Write Vertices
for uniqueVertUV in uniqueVertUVs:
index = uniqueVertUV[0]
vert = meshVerts[index]
vertCoord = tuple(vert.co)
vertNormal = tuple(vert.no)
filedata +='%.8f %.8f %.8f ' % (vertCoord[0] + position[0], vertCoord[1] + position[1], vertCoord[2] + position[2])
filedata +='%.8f %.8f %.8f ' % vertNormal
filedata +='%.8f %.8f ' % (uniqueVertUV[1], 1 - uniqueVertUV[2])
if index in grannyVertexBoneWeights:
vBoneWeightTuple = grannyVertexBoneWeights[index]
else:
raise "Error: Mesh has unweighted vertices!"
#vBoneWeightTuple = ([-1,-1,-1,-1],[-1,-1,-1,-1]) # Unweighted vertex - raise error
filedata +='%d %d %d %d ' % (vBoneWeightTuple[0][0], vBoneWeightTuple[0][1],vBoneWeightTuple[0][2],vBoneWeightTuple[0][3]) # Bone Ids
filedata +='%d %d %d %d\n' % (vBoneWeightTuple[1][0], vBoneWeightTuple[1][1],vBoneWeightTuple[1][2],vBoneWeightTuple[1][3]) # Bone Weights
# Write Triangles
filedata += "triangles\n"
for triangle in triangleVertUVIndexes:
#filedata += '%i %i %i\n' % tuple(triangle)
filedata += '%i %i %i\n' % (triangle[0],triangle[1],triangle[2])
filedata += "end"
file.write(filedata)
file.close()
Blender.Window.WaitCursor(0)
print "End BR2 Export."
if __name__=='__main__':
Blender.Window.FileSelector(saveBR2, "Export BR2", Blender.sys.makename(ext='.br2'))
| {
"content_hash": "e95ab24b80ca084717ab3e45ac4a130a",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 190,
"avg_line_length": 34.95,
"alnum_prop": 0.598838677101742,
"repo_name": "venetianthief/civ-dbs",
"id": "d6e7fd076e6554f003273a54409276153f75fc04",
"size": "11905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "older/OLD_export_br2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196581"
}
],
"symlink_target": ""
} |
from candidate.models import CandidateCampaign
from config.base import get_environment_variable
from datetime import date, datetime
from django.db import models
from django.db.models import F, Q, Count
from election.models import ElectionManager
from exception.models import handle_exception, handle_record_found_more_than_one_exception
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from measure.models import ContestMeasureManager
from office.models import ContestOfficeManager
from polling_location.models import PollingLocationManager
import wevote_functions.admin
from wevote_functions.functions import convert_date_to_date_as_integer, convert_to_int, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_ballot_returned_integer, fetch_site_unique_id_prefix
OFFICE = 'OFFICE'
CANDIDATE = 'CANDIDATE'
POLITICIAN = 'POLITICIAN'
MEASURE = 'MEASURE'
KIND_OF_BALLOT_ITEM_CHOICES = (
(OFFICE, 'Office'),
(CANDIDATE, 'Candidate'),
(POLITICIAN, 'Politician'),
(MEASURE, 'Measure'),
)
GOOGLE_MAPS_API_KEY = get_environment_variable("GOOGLE_MAPS_API_KEY")
GEOCODE_TIMEOUT = 10
logger = wevote_functions.admin.get_logger(__name__)
class BallotItem(models.Model):
"""
This is a generated table with ballot item data from a variety of sources, including Google Civic
One ballot item is either 1) a measure/referendum or 2) an office that is being competed for
"""
# The unique id of the voter for which this ballot was retrieved
voter_id = models.IntegerField(verbose_name="the voter unique id",
default=0, null=False, blank=False, db_index=True)
# The polling location for which this ballot was retrieved
polling_location_we_vote_id = models.CharField(
verbose_name="we vote permanent id of the polling location", max_length=255, default=None, null=True,
blank=True, unique=False, db_index=True)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=20, null=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
google_ballot_placement = models.BigIntegerField(
verbose_name="the order this item should appear on the ballot", null=True, blank=True, unique=False)
local_ballot_order = models.IntegerField(
verbose_name="locally calculated order this item should appear on the ballot", null=True, blank=True)
# The id for this contest office specific to this server.
# TODO contest_office_id should be positive integer as opposed to CharField
contest_office_id = models.CharField(verbose_name="local id for this contest office", max_length=255, null=True,
blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this office", max_length=255, default=None, null=True,
blank=True, unique=False)
# The local database id for this measure, specific to this server.
# TODO contest_measure_id should be positive integer as opposed to CharField
contest_measure_id = models.CharField(
verbose_name="contest_measure unique id", max_length=255, null=True, blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this measure", max_length=255, default=None, null=True,
blank=True, unique=False)
# This is a sortable name, either the candidate name or the measure name
ballot_item_display_name = models.CharField(verbose_name="a label we can sort by", max_length=255, null=True,
blank=True)
measure_subtitle = models.TextField(verbose_name="google civic referendum subtitle",
null=True, blank=True, default="")
measure_text = models.TextField(verbose_name="measure text", null=True, blank=True, default="")
measure_url = models.URLField(verbose_name='url of measure', blank=True, null=True)
yes_vote_description = models.TextField(verbose_name="what a yes vote means", null=True, blank=True, default=None)
no_vote_description = models.TextField(verbose_name="what a no vote means", null=True, blank=True, default=None)
def is_contest_office(self):
if positive_value_exists(self.contest_office_id) or positive_value_exists(self.contest_office_we_vote_id):
return True
return False
def is_contest_measure(self):
if positive_value_exists(self.contest_measure_id) or positive_value_exists(self.contest_measure_we_vote_id):
return True
return False
def display_ballot_item(self):
return self.ballot_item_display_name
def fetch_ballot_order(self):
return 3
def candidates_list(self):
candidates_list_temp = CandidateCampaign.objects.all()
candidates_list_temp = candidates_list_temp.filter(google_civic_election_id=self.google_civic_election_id)
candidates_list_temp = candidates_list_temp.filter(contest_office_id=self.contest_office_id)
return candidates_list_temp
class BallotItemManager(models.Model):
def remove_duplicate_ballot_item_entries(
self, google_civic_election_id, contest_measure_id, contest_office_id,
voter_id=0, polling_location_we_vote_id=""):
status = ""
success = ""
ballot_item_found = False
ballot_item = None
ballot_item_list_manager = BallotItemListManager()
# retrieve_possible_duplicate_ballot_items
retrieve_results = ballot_item_list_manager.retrieve_ballot_item_duplicate_list(
google_civic_election_id, contest_measure_id, contest_office_id,
voter_id=voter_id, polling_location_we_vote_id=polling_location_we_vote_id)
if retrieve_results['ballot_item_list_count'] == 1:
# Only one found
ballot_item_list = retrieve_results['ballot_item_list']
ballot_item = ballot_item_list[0]
ballot_item_found = True
elif retrieve_results['ballot_item_list_count'] > 1:
# If here, we found a duplicate
first_one_kept = False
ballot_item_list = retrieve_results['ballot_item_list']
for one_ballot_item in ballot_item_list:
if first_one_kept:
one_ballot_item.delete()
else:
ballot_item = one_ballot_item
ballot_item_found = True
first_one_kept = True
results = {
"status": status,
"success": success,
"ballot_item_found": ballot_item_found,
"ballot_item": ballot_item,
}
return results
def refresh_cached_ballot_item_measure_info(self, ballot_item, contest_measure=None):
"""
The BallotItem tables cache information from other tables. This function reaches out to the source tables
and copies over the latest information to the BallotItem table.
:param ballot_item:
:param contest_measure: No need to retrieve again if passed in
:return:
"""
values_changed = False
measure_found = False
contest_measure_manager = ContestMeasureManager()
results = {}
if contest_measure and hasattr(contest_measure, 'measure_title'):
measure_found = True
elif positive_value_exists(ballot_item.contest_measure_id):
results = contest_measure_manager.retrieve_contest_measure_from_id(ballot_item.contest_measure_id)
measure_found = results['contest_measure_found']
contest_measure = results['contest_measure']
elif positive_value_exists(ballot_item.contest_measure_we_vote_id):
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(
ballot_item.contest_measure_we_vote_id)
measure_found = results['contest_measure_found']
contest_measure = results['contest_measure']
if measure_found:
ballot_item.contest_measure_id = contest_measure.id
ballot_item.contest_measure_we_vote_id = contest_measure.we_vote_id
ballot_item.ballot_item_display_name = contest_measure.measure_title
ballot_item.google_ballot_placement = contest_measure.google_ballot_placement
ballot_item.measure_subtitle = contest_measure.measure_subtitle
ballot_item.measure_text = contest_measure.measure_text
ballot_item.measure_url = contest_measure.measure_url
ballot_item.no_vote_description = contest_measure.ballotpedia_no_vote_description
ballot_item.yes_vote_description = contest_measure.ballotpedia_yes_vote_description
values_changed = True
if values_changed:
ballot_item.save()
return ballot_item
def refresh_cached_ballot_item_office_info(self, ballot_item, contest_office=None):
"""
The BallotItem tables cache information from other tables. This function reaches out to the source tables
and copies over the latest information to the BallotItem table.
:param ballot_item:
:param contest_office: No need to retrieve again if passed in
:return:
"""
values_changed = False
office_found = False
contest_office_manager = ContestOfficeManager()
if contest_office and hasattr(contest_office, 'office_name'):
office_found = True
elif positive_value_exists(ballot_item.contest_office_id):
results = contest_office_manager.retrieve_contest_office_from_id(ballot_item.contest_office_id)
office_found = results['contest_office_found']
contest_office = results['contest_office']
elif positive_value_exists(ballot_item.contest_office_we_vote_id):
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(
ballot_item.contest_office_we_vote_id)
office_found = results['contest_office_found']
contest_office = results['contest_office']
if office_found:
ballot_item.contest_office_id = contest_office.id
ballot_item.contest_office_we_vote_id = contest_office.we_vote_id
ballot_item.ballot_item_display_name = contest_office.office_name
ballot_item.google_ballot_placement = contest_office.google_ballot_placement
values_changed = True
if values_changed:
ballot_item.save()
return ballot_item
def retrieve_ballot_item(self, ballot_item_id=0):
status = ""
ballot_item = BallotItem()
try:
if positive_value_exists(ballot_item_id):
ballot_item = BallotItem.objects.get(id=ballot_item_id)
if ballot_item.id:
ballot_item_found = True
status = "BALLOT_ITEM_FOUND_WITH_BALLOT_ITEM_ID "
else:
ballot_item_found = False
status = "ELECTION_NOT_FOUND_WITH_BALLOT_ITEM_ID "
success = True
else:
ballot_item_found = False
status = "Insufficient variables included to retrieve one ballot_item."
success = False
except BallotItem.MultipleObjectsReturned as e:
status += "ERROR_MORE_THAN_ONE_BALLOT_ITEM_FOUND-BY_BALLOT_ITEM "
handle_record_found_more_than_one_exception(e, logger, exception_message_optional=status)
ballot_item_found = False
success = False
except BallotItem.DoesNotExist:
ballot_item_found = False
status += "BALLOT_ITEM_NOT_FOUND "
success = True
results = {
'success': success,
'status': status,
'ballot_item_found': ballot_item_found,
'ballot_item': ballot_item,
}
return results
def update_or_create_ballot_item_for_voter(
self, voter_id, google_civic_election_id, google_ballot_placement,
ballot_item_display_name, measure_subtitle, measure_text, local_ballot_order,
contest_office_id=0, contest_office_we_vote_id='',
contest_measure_id=0, contest_measure_we_vote_id='', state_code='', defaults={}):
ballot_item_found = False # At the end, does a ballot_item exist?
ballot_item_on_stage = None
delete_extra_ballot_item_entries = False
exception_multiple_object_returned = False
new_ballot_item_created = False
status = ""
success = True
# We require both contest_office_id and contest_office_we_vote_id
# OR both contest_measure_id and contest_measure_we_vote_id
required_office_ids_found = positive_value_exists(contest_office_id) \
and positive_value_exists(contest_office_we_vote_id)
required_measure_ids_found = positive_value_exists(contest_measure_id) \
and positive_value_exists(contest_measure_we_vote_id)
contest_or_measure_identifier_found = required_office_ids_found or required_measure_ids_found
if not contest_or_measure_identifier_found:
success = False
status += 'MISSING_SUFFICIENT_OFFICE_OR_MEASURE_IDS '
# If here, then we know that there are sufficient office or measure ids
elif not google_civic_election_id:
success = False
status += 'MISSING_GOOGLE_CIVIC_ELECTION_ID '
elif not voter_id:
success = False
status += 'MISSING_VOTER_ID '
else:
try:
# Retrieve list of ballot_items that match
# Use get_or_create to see if a ballot item exists
create_values = {
# Values we search against
'google_civic_election_id': google_civic_election_id,
'voter_id': voter_id,
# The rest of the values
'contest_office_id': contest_office_id,
'contest_office_we_vote_id': contest_office_we_vote_id,
'contest_measure_id': contest_measure_id,
'contest_measure_we_vote_id': contest_measure_we_vote_id,
'google_ballot_placement': google_ballot_placement,
'local_ballot_order': local_ballot_order,
'ballot_item_display_name': ballot_item_display_name,
'measure_subtitle': measure_subtitle,
'measure_text': measure_text,
'state_code': state_code,
}
if 'measure_url' in defaults:
create_values['measure_url'] = defaults['measure_url']
if 'yes_vote_description' in defaults:
create_values['yes_vote_description'] = defaults['yes_vote_description']
if 'no_vote_description' in defaults:
create_values['no_vote_description'] = defaults['no_vote_description']
# We search with contest_measure_id and contest_office_id because they are (will be) integers,
# which will be a faster search
ballot_item_on_stage, new_ballot_item_created = BallotItem.objects.get_or_create(
contest_measure_id__exact=contest_measure_id,
contest_office_id__exact=contest_office_id,
google_civic_election_id__exact=google_civic_election_id,
voter_id__exact=voter_id,
defaults=create_values)
ballot_item_found = True
except BallotItem.MultipleObjectsReturned as e:
status += "UPDATE_OR_CREATE_BALLOT_ITEM-MORE_THAN_ONE_FOUND-ABOUT_TO_DELETE_DUPLICATE "
handle_record_found_more_than_one_exception(e, logger, exception_message_optional=status)
success = False
delete_extra_ballot_item_entries = True
exception_multiple_object_returned = True
if positive_value_exists(delete_extra_ballot_item_entries):
success = False
ballot_item_manager = BallotItemManager()
results = ballot_item_manager.remove_duplicate_ballot_item_entries(
google_civic_election_id, contest_measure_id, contest_office_id, voter_id=voter_id)
if results['ballot_item_found']:
ballot_item_found = True
ballot_item_on_stage = results['ballot_item']
success = True
if positive_value_exists(ballot_item_found):
try:
# if a ballot_item is found (instead of just created), *then* update it
# Note, we never update google_civic_election_id or voter_id
if new_ballot_item_created:
success = True
status += 'BALLOT_ITEM_CREATED '
else:
ballot_item_on_stage.contest_office_id = contest_office_id
ballot_item_on_stage.contest_office_we_vote_id = contest_office_we_vote_id
ballot_item_on_stage.contest_measure_id = contest_measure_id
ballot_item_on_stage.contest_measure_we_vote_id = contest_measure_we_vote_id
ballot_item_on_stage.google_ballot_placement = google_ballot_placement
ballot_item_on_stage.local_ballot_order = local_ballot_order
ballot_item_on_stage.ballot_item_display_name = ballot_item_display_name
ballot_item_on_stage.measure_subtitle = measure_subtitle
ballot_item_on_stage.measure_text = measure_text
if 'measure_url' in defaults:
measure_url = defaults['measure_url']
ballot_item_on_stage.measure_url = measure_url
if 'yes_vote_description' in defaults:
yes_vote_description = defaults['yes_vote_description']
ballot_item_on_stage.yes_vote_description = yes_vote_description
if 'no_vote_description' in defaults:
no_vote_description = defaults['no_vote_description']
ballot_item_on_stage.no_vote_description = no_vote_description
ballot_item_on_stage.save()
success = True
status += 'BALLOT_ITEM_UPDATED '
except Exception as e:
status += "UPDATE_OR_CREATE_BALLOT_ITEM-MORE_THAN_ONE_FOUND "
handle_record_found_more_than_one_exception(e, logger, exception_message_optional=status)
success = False
exception_multiple_object_returned = True
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'ballot_item_found': ballot_item_found,
'new_ballot_item_created': new_ballot_item_created,
}
return results
def update_or_create_ballot_item_for_polling_location(
self, polling_location_we_vote_id, google_civic_election_id, google_ballot_placement,
ballot_item_display_name, measure_subtitle, measure_text, local_ballot_order,
contest_office_id=0, contest_office_we_vote_id='',
contest_measure_id=0, contest_measure_we_vote_id='', state_code='', defaults={}):
ballot_item_found = False # At the end, does a ballot_item exist?
ballot_item_on_stage = None
delete_extra_ballot_item_entries = False
exception_multiple_object_returned = False
new_ballot_item_created = False
status = ""
# Make sure we have this polling_location
polling_location_manager = PollingLocationManager()
results = polling_location_manager.retrieve_polling_location_by_id(0, polling_location_we_vote_id)
if results['polling_location_found']:
polling_location_found = True
else:
polling_location_found = False
if positive_value_exists(contest_office_we_vote_id) and not positive_value_exists(contest_office_id):
# Look up contest_office_id
contest_office_manager = ContestOfficeManager()
contest_office_id = contest_office_manager.fetch_contest_office_id_from_we_vote_id(
contest_office_we_vote_id)
elif positive_value_exists(contest_office_id) and not positive_value_exists(contest_office_we_vote_id):
# Look up contest_office_we_vote_id
contest_office_manager = ContestOfficeManager()
contest_office_we_vote_id = contest_office_manager.fetch_contest_office_we_vote_id_from_id(
contest_office_id)
if positive_value_exists(contest_measure_we_vote_id) and not positive_value_exists(contest_measure_id):
# Look up contest_measure_id
contest_measure_manager = ContestMeasureManager()
contest_measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(
contest_measure_we_vote_id)
elif positive_value_exists(contest_measure_id) and not positive_value_exists(contest_measure_we_vote_id):
# Look up contest_measure_id
contest_measure_manager = ContestMeasureManager()
contest_measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(
contest_measure_id)
# We require both contest_office_id and contest_office_we_vote_id
# OR both contest_measure_id and contest_measure_we_vote_id
required_office_ids_found = positive_value_exists(contest_office_id) \
and positive_value_exists(contest_office_we_vote_id)
required_measure_ids_found = positive_value_exists(contest_measure_id) \
and positive_value_exists(contest_measure_we_vote_id)
contest_or_measure_identifier_found = required_office_ids_found or required_measure_ids_found
if not contest_or_measure_identifier_found:
success = False
status += 'MISSING_SUFFICIENT_OFFICE_OR_MEASURE_IDS-POLLING_LOCATION '
# If here, then we know that there are sufficient office or measure ids
elif not google_civic_election_id:
success = False
status += 'MISSING_GOOGLE_CIVIC_ELECTION_ID-POLLING_LOCATION '
elif not polling_location_we_vote_id:
success = False
status += 'MISSING_POLLING_LOCATION_WE_VOTE_ID '
# We Vote Server doesn't have a matching polling location yet.
# elif not polling_location_found:
# success = False
# status = 'MISSING_POLLING_LOCATION_LOCALLY'
else:
try:
# Use get_or_create to see if a ballot item exists
create_values = {
# Values we search against
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id': polling_location_we_vote_id,
# The rest of the values
'contest_office_id': contest_office_id,
'contest_office_we_vote_id': contest_office_we_vote_id,
'contest_measure_id': contest_measure_id,
'contest_measure_we_vote_id': contest_measure_we_vote_id,
'google_ballot_placement': google_ballot_placement,
'local_ballot_order': local_ballot_order,
'ballot_item_display_name': ballot_item_display_name,
'measure_subtitle': measure_subtitle,
'measure_text': measure_text,
'state_code': state_code,
}
if 'measure_url' in defaults:
create_values['measure_url'] = defaults['measure_url']
if 'yes_vote_description' in defaults:
create_values['yes_vote_description'] = defaults['yes_vote_description']
if 'no_vote_description' in defaults:
create_values['no_vote_description'] = defaults['no_vote_description']
# We search with contest_measure_id and contest_office_id because they are (will be) integers,
# which will be a faster search
ballot_item_on_stage, new_ballot_item_created = BallotItem.objects.get_or_create(
contest_measure_id__exact=contest_measure_id,
contest_office_id__exact=contest_office_id,
google_civic_election_id__exact=google_civic_election_id,
polling_location_we_vote_id__iexact=polling_location_we_vote_id,
defaults=create_values)
ballot_item_found = True
except BallotItem.MultipleObjectsReturned as e:
status += 'MULTIPLE_MATCHING_BALLOT_ITEMS_FOUND-POLLING_LOCATION '
handle_record_found_more_than_one_exception(e, logger=logger, exception_message_optional=status)
delete_extra_ballot_item_entries = True
exception_multiple_object_returned = True
if positive_value_exists(delete_extra_ballot_item_entries):
ballot_item_manager = BallotItemManager()
results = ballot_item_manager.remove_duplicate_ballot_item_entries(
google_civic_election_id, contest_measure_id, contest_office_id,
polling_location_we_vote_id=polling_location_we_vote_id)
if results['ballot_item_found']:
ballot_item_found = True
ballot_item_on_stage = results['ballot_item']
# if a ballot_item is found (instead of just created), *then* update it
# Note, we never update google_civic_election_id or voter_id
if ballot_item_found:
try:
ballot_item_on_stage.contest_office_id = contest_office_id
ballot_item_on_stage.contest_office_we_vote_id = contest_office_we_vote_id
ballot_item_on_stage.contest_measure_id = contest_measure_id
ballot_item_on_stage.contest_measure_we_vote_id = contest_measure_we_vote_id
ballot_item_on_stage.google_ballot_placement = google_ballot_placement
ballot_item_on_stage.local_ballot_order = local_ballot_order
ballot_item_on_stage.ballot_item_display_name = ballot_item_display_name
ballot_item_on_stage.measure_subtitle = measure_subtitle
ballot_item_on_stage.measure_text = measure_text
ballot_item_on_stage.state_code = state_code
if 'measure_url' in defaults:
measure_url = defaults['measure_url']
ballot_item_on_stage.measure_url = measure_url
if 'yes_vote_description' in defaults:
yes_vote_description = defaults['yes_vote_description']
ballot_item_on_stage.yes_vote_description = yes_vote_description
if 'no_vote_description' in defaults:
no_vote_description = defaults['no_vote_description']
ballot_item_on_stage.no_vote_description = no_vote_description
ballot_item_on_stage.save()
success = True
status = 'BALLOT_ITEM_UPDATED-POLLING_LOCATION'
except BallotItemManager.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
success = False
status = 'MULTIPLE_MATCHING_BALLOT_ITEMS_FOUND-POLLING_LOCATION '
exception_multiple_object_returned = True
else:
success = True
status = 'BALLOT_ITEM_CREATED-POLLING_LOCATION'
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'ballot_item': ballot_item_on_stage,
'ballot_item_found': ballot_item_found,
'new_ballot_item_created': new_ballot_item_created,
}
return results
def create_ballot_item_row_entry(self, ballot_item_display_name, local_ballot_order, state_code,
google_civic_election_id, defaults):
"""
Create BallotItem table entry with BallotItem details
:param ballot_item_display_name:
:param local_ballot_order:
:param state_code:
:param google_civic_election_id:
:param defaults:
:return:
"""
new_ballot_item_created = False
new_ballot_item = ''
try:
if positive_value_exists(state_code):
state_code = state_code.lower()
new_ballot_item = BallotItem.objects.create(
ballot_item_display_name=ballot_item_display_name,
local_ballot_order=local_ballot_order,
state_code=state_code,
google_civic_election_id=google_civic_election_id)
if new_ballot_item:
success = True
status = "CONTEST_OFFICE_CREATED"
new_ballot_item_created = True
new_ballot_item.contest_office_id = defaults['contest_office_id']
new_ballot_item.contest_office_we_vote_id = defaults['contest_office_we_vote_id']
new_ballot_item.contest_measure_id = defaults['contest_measure_id']
new_ballot_item.contest_measure_we_vote_id = defaults['contest_measure_we_vote_id']
new_ballot_item.measure_subtitle = defaults['measure_subtitle']
new_ballot_item.polling_location_we_vote_id = defaults['polling_location_we_vote_id']
if 'measure_url' in defaults:
measure_url = defaults['measure_url']
new_ballot_item.measure_url = measure_url
if 'yes_vote_description' in defaults:
yes_vote_description = defaults['yes_vote_description']
new_ballot_item.yes_vote_description = yes_vote_description
if 'no_vote_description' in defaults:
no_vote_description = defaults['no_vote_description']
new_ballot_item.no_vote_description = no_vote_description
if 'state_code' in defaults and positive_value_exists(defaults['state_code']):
state_code_from_defaults = defaults['state_code']
state_code_from_defaults = state_code_from_defaults.lower()
new_ballot_item.state_code = state_code_from_defaults
new_ballot_item.save()
status = "NEW_BALLOT_ITEM_CREATED "
else:
success = False
status = "BALLOT_ITEM_CREATE_FAILED"
except Exception as e:
success = False
new_ballot_item_created = False
status = "BALLOT_ITEM_RETRIEVE_ERROR"
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'new_ballot_item_created': new_ballot_item_created,
'ballot_item': new_ballot_item,
}
return results
def update_ballot_item_row_entry(self, ballot_item_display_name, local_ballot_order, state_code,
google_civic_election_id, defaults):
"""
Update BallotItem table entry with matching we_vote_id
:param ballot_item_display_name:
:param local_ballot_order:
:param state_code:
:param google_civic_election_id:
:param defaults:
:return:
"""
success = False
status = ""
ballot_item_updated = False
existing_ballot_item_entry = ''
ballot_item_found = False
try:
if positive_value_exists(defaults['polling_location_we_vote_id']) and \
positive_value_exists(google_civic_election_id):
if positive_value_exists(defaults['contest_office_we_vote_id']):
existing_ballot_item_entry = BallotItem.objects.get(
contest_office_we_vote_id__iexact=defaults['contest_office_we_vote_id'],
polling_location_we_vote_id__iexact=defaults['polling_location_we_vote_id'],
google_civic_election_id=google_civic_election_id)
ballot_item_found = True
elif positive_value_exists(defaults['contest_measure_we_vote_id']):
existing_ballot_item_entry = BallotItem.objects.get(
contest_measure_we_vote_id__iexact=defaults['contest_measure_we_vote_id'],
polling_location_we_vote_id__iexact=defaults['polling_location_we_vote_id'],
google_civic_election_id=google_civic_election_id)
ballot_item_found = True
if ballot_item_found:
# found the existing entry, update the values
existing_ballot_item_entry.ballot_item_display_name = ballot_item_display_name
existing_ballot_item_entry.local_ballot_order = local_ballot_order
existing_ballot_item_entry.state_code = state_code
existing_ballot_item_entry.contest_office_id = defaults['contest_office_id']
existing_ballot_item_entry.contest_office_we_vote_id = defaults['contest_office_we_vote_id']
existing_ballot_item_entry.contest_measure_id = defaults['contest_measure_id']
existing_ballot_item_entry.contest_measure_we_vote_id = defaults['contest_measure_we_vote_id']
existing_ballot_item_entry.measure_subtitle = defaults['measure_subtitle']
if 'measure_url' in defaults:
measure_url = defaults['measure_url']
existing_ballot_item_entry.measure_url = measure_url
if 'yes_vote_description' in defaults:
yes_vote_description = defaults['yes_vote_description']
existing_ballot_item_entry.yes_vote_description = yes_vote_description
if 'no_vote_description' in defaults:
no_vote_description = defaults['no_vote_description']
existing_ballot_item_entry.no_vote_description = no_vote_description
if 'state_code' in defaults and positive_value_exists(defaults['state_code']):
state_code = defaults['state_code']
existing_ballot_item_entry.state_code = state_code.lower()
# now go ahead and save this entry (update)
existing_ballot_item_entry.save()
ballot_item_updated = True
success = True
status = "BALLOT_ITEM_UPDATED"
except Exception as e:
success = False
ballot_item_updated = False
status = "BALLOT_ITEM_RETRIEVE_ERROR"
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'ballot_item_updated': ballot_item_updated,
'ballot_item': existing_ballot_item_entry,
}
return results
class BallotItemListManager(models.Model):
"""
A way to work with a list of ballot_items
"""
def delete_all_ballot_items_for_voter(self, voter_id, google_civic_election_id):
ballot_item_list_deleted = False
ballot_items_deleted_count = 0
try:
ballot_item_queryset = BallotItem.objects.filter(voter_id=voter_id)
if positive_value_exists(google_civic_election_id):
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_items_deleted_count = ballot_item_queryset.count()
ballot_item_queryset.delete()
ballot_item_list_deleted = True
status = 'BALLOT_ITEMS_DELETED'
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_DELETED_DoesNotExist'
ballot_items_deleted_count = 0
except Exception as e:
handle_exception(e, logger=logger)
ballot_items_deleted_count = 0
status = 'FAILED delete_all_ballot_items_for_voter ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': True if ballot_item_list_deleted else False,
'status': status,
'google_civic_election_id': google_civic_election_id,
'voter_id': voter_id,
'ballot_item_list_deleted': ballot_item_list_deleted,
'ballot_items_deleted_count': ballot_items_deleted_count,
}
return results
def retrieve_ballot_items_for_election(self, google_civic_election_id):
ballot_item_list = []
ballot_item_list_found = False
try:
# We cannot use 'readonly' because the result set sometimes gets modified with .save()
ballot_item_queryset = BallotItem.objects.all()
ballot_item_queryset = ballot_item_queryset.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_queryset = ballot_item_queryset.filter(
google_civic_election_id=google_civic_election_id)
ballot_item_list = list(ballot_item_queryset)
success = True
if positive_value_exists(ballot_item_list):
ballot_item_list_found = True
status = 'BALLOT_ITEMS_FOUND '
else:
status = 'NO_BALLOT_ITEMS_FOUND, not positive_value_exists '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
success = True
status = 'NO_BALLOT_ITEMS_FOUND '
ballot_item_list = []
except Exception as e:
success = False
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_items_for_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': success,
'status': status,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
def retrieve_ballot_items_for_election_lacking_state(self, google_civic_election_id, number_to_retrieve=5000):
"""
:param google_civic_election_id:
:param number_to_retrieve: Repairing 1000 ballot items takes about 9 seconds.
:return:
"""
ballot_item_list = []
ballot_item_list_found = False
try:
ballot_item_queryset = BallotItem.objects.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_item_queryset = ballot_item_queryset.filter(Q(state_code=None) | Q(state_code=""))
ballot_item_list = list(ballot_item_queryset[:number_to_retrieve])
if positive_value_exists(ballot_item_list):
ballot_item_list_found = True
status = 'BALLOT_ITEMS_FOUND_WITHOUT_STATE '
else:
status = 'NO_BALLOT_ITEMS_WITHOUT_STATE_FOUND, not positive_value_exists '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_WITHOUT_STATE_FOUND '
ballot_item_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_items_for_election_lacking_state ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
def count_ballot_items_for_election_lacking_state(self, google_civic_election_id):
ballot_item_list_count = 0
success = False
try:
ballot_item_queryset = BallotItem.objects.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_item_queryset = ballot_item_queryset.filter(Q(state_code=None) | Q(state_code=""))
ballot_item_list_count = ballot_item_queryset.count()
status = 'BALLOT_ITEMS_WITHOUT_STATE_FOUND '
success = True
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_WITHOUT_STATE_FOUND '
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_items_for_election_lacking_state ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
results = {
'success': success,
'status': status,
'ballot_item_list_count': ballot_item_list_count,
}
return results
def retrieve_all_ballot_items_for_contest_measure(self, measure_id, measure_we_vote_id):
ballot_item_list = []
ballot_item_list_found = False
if not positive_value_exists(measure_id) and not positive_value_exists(measure_we_vote_id):
status = 'VALID_MEASURE_ID_AND_MEASURE_WE_VOTE_ID_MISSING'
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'measure_id': measure_id,
'measure_we_vote_id': measure_we_vote_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
try:
ballot_item_queryset = BallotItem.objects.all()
if positive_value_exists(measure_id):
ballot_item_queryset = ballot_item_queryset.filter(contest_measure_id=measure_id)
elif positive_value_exists(measure_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(contest_measure_we_vote_id=measure_we_vote_id)
ballot_item_queryset = ballot_item_queryset.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_list = ballot_item_queryset
if len(ballot_item_list):
ballot_item_list_found = True
status = 'BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_contest_measure '
else:
status = 'NO_BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_contest_measure '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_FOUND_DoesNotExist, retrieve_all_ballot_items_for_contest_measure '
ballot_item_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_ballot_items_for_contest_measure ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'measure_id': measure_id,
'measure_we_vote_id': measure_we_vote_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
def retrieve_all_ballot_items_for_contest_office(self, office_id, office_we_vote_id):
ballot_item_list = []
ballot_item_list_found = False
if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):
status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
try:
ballot_item_queryset = BallotItem.objects.all()
if positive_value_exists(office_id):
ballot_item_queryset = ballot_item_queryset.filter(contest_office_id=office_id)
elif positive_value_exists(office_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(contest_office_we_vote_id=office_we_vote_id)
ballot_item_queryset = ballot_item_queryset.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_list = ballot_item_queryset
if len(ballot_item_list):
ballot_item_list_found = True
status = 'BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_contest_office '
else:
status = 'NO_BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_contest_office '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_FOUND_DoesNotExist, retrieve_all_ballot_items_for_contest_office '
ballot_item_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_ballot_items_for_contest_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
def retrieve_ballot_item_duplicate_list(
self, google_civic_election_id, contest_measure_id, contest_office_id,
voter_id=0, polling_location_we_vote_id=""):
ballot_item_list = []
ballot_item_list_count = 0
ballot_item_list_found = False
status = ""
if not positive_value_exists(voter_id) and not positive_value_exists(polling_location_we_vote_id):
status += "RETRIEVE_BALLOT_ITEM_DUPLICATE_LIST-MISSING_REQUIRED_VARIABLE "
success = False
results = {
'success': success,
'status': status,
'ballot_item_list': ballot_item_list,
'ballot_item_list_count': ballot_item_list_count,
'ballot_item_list_found': ballot_item_list_found,
}
return results
try:
# We cannot use 'readonly' because the result set sometimes gets modified with .save()
ballot_item_queryset = BallotItem.objects.all()
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_item_queryset = ballot_item_queryset.filter(contest_measure_id=contest_measure_id)
ballot_item_queryset = ballot_item_queryset.filter(contest_office_id=contest_office_id)
if positive_value_exists(voter_id):
ballot_item_queryset = ballot_item_queryset.filter(voter_id=voter_id)
if positive_value_exists(polling_location_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(
polling_location_we_vote_id__iexact=polling_location_we_vote_id)
ballot_item_list = list(ballot_item_queryset)
ballot_item_list_count = len(ballot_item_list)
success = True
if positive_value_exists(ballot_item_list):
ballot_item_list_found = True
status += 'BALLOT_ITEM_DUPLICATE_LIST_FOUND '
else:
status += 'NO_BALLOT_ITEM_DUPLICATE_LIST_FOUND-EMPTY_LIST '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
success = True
status += 'NO_BALLOT_ITEM_DUPLICATE_LIST_FOUND '
ballot_item_list = []
except Exception as e:
success = False
status += 'FAILED retrieve_ballot_item_duplicate_list ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'ballot_item_list': ballot_item_list,
'ballot_item_list_count': ballot_item_list_count,
'ballot_item_list_found': ballot_item_list_found,
}
return results
def delete_all_ballot_items_for_contest_office(self, office_id, office_we_vote_id):
ballot_items_deleted_count = 0
if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):
status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'
success = False
results = {
'success': success,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'ballot_items_deleted_count': ballot_items_deleted_count,
}
return results
try:
ballot_item_queryset = BallotItem.objects.all()
if positive_value_exists(office_id):
ballot_item_queryset = ballot_item_queryset.filter(contest_office_id=office_id)
elif positive_value_exists(office_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(contest_office_we_vote_id=office_we_vote_id)
ballot_items_deleted_count = ballot_item_queryset.count()
ballot_item_queryset.delete()
status = 'BALLOT_ITEMS_DELETE, delete_all_ballot_items_for_contest_office '
success = True
except Exception as e:
success = False
ballot_items_deleted_count = 0
handle_exception(e, logger=logger)
status = 'FAILED delete_all_ballot_items_for_contest_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': success,
'status': status,
'office_id': office_id,
'office_we_vote_id': office_we_vote_id,
'ballot_items_deleted_count': ballot_items_deleted_count,
}
return results
def retrieve_all_ballot_items_for_voter(self, voter_id, google_civic_election_id, read_only=False):
polling_location_we_vote_id = ''
ballot_item_list = []
ballot_item_list_found = False
try:
if positive_value_exists(voter_id):
# Intentionally not using 'readonly' here as the default
if read_only:
ballot_item_queryset = BallotItem.objects.using('readonly').all()
else:
ballot_item_queryset = BallotItem.objects.all()
ballot_item_queryset = ballot_item_queryset.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_queryset = ballot_item_queryset.filter(voter_id=voter_id)
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_item_list = list(ballot_item_queryset)
if len(ballot_item_list):
ballot_item_list_found = True
status = 'BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_voter '
else:
status = 'NO_BALLOT_ITEMS_FOUND_0 '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_FOUND_DoesNotExist '
ballot_item_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_ballot_items_for_voter ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'google_civic_election_id': google_civic_election_id,
'voter_id': voter_id,
'polling_location_we_vote_id': polling_location_we_vote_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
def retrieve_all_ballot_items_for_polling_location(self, polling_location_we_vote_id, google_civic_election_id,
for_editing=False):
voter_id = 0
ballot_item_list = []
ballot_item_list_found = False
try:
if positive_value_exists(for_editing):
ballot_item_queryset = BallotItem.objects.all()
else:
ballot_item_queryset = BallotItem.objects.using('readonly').all()
ballot_item_queryset = ballot_item_queryset.order_by('local_ballot_order', 'google_ballot_placement')
ballot_item_queryset = ballot_item_queryset.filter(polling_location_we_vote_id=polling_location_we_vote_id)
if positive_value_exists(google_civic_election_id):
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_item_list = ballot_item_queryset
if len(ballot_item_list):
ballot_item_list_found = True
status = 'BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_polling_location '
else:
status = 'NO_BALLOT_ITEMS_FOUND, retrieve_all_ballot_items_for_polling_location '
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_ITEMS_FOUND_DoesNotExist, retrieve_all_ballot_items_for_polling_location '
ballot_item_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_ballot_items_for_polling_location ' \
'{error} [type: {error_type}]'.format(error=e.message, error_type=type(e))
results = {
'success': True if ballot_item_list_found else False,
'status': status,
'google_civic_election_id': google_civic_election_id,
'voter_id': voter_id,
'polling_location_we_vote_id': polling_location_we_vote_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list,
}
return results
def fetch_most_recent_google_civic_election_id(self):
election_manager = ElectionManager()
results = election_manager.retrieve_elections_by_date()
if results['success']:
election_list = results['election_list']
for one_election in election_list:
ballot_item_queryset = BallotItem.objects.all()
ballot_item_queryset = ballot_item_queryset.filter(
google_civic_election_id=one_election.google_civic_election_id)
number_found = ballot_item_queryset.count()
if positive_value_exists(number_found):
# Since we are starting with the most recent election, as soon as we find
# any election with ballot items, we can exit.
return one_election.google_civic_election_id
return 0
def fetch_ballot_item_list_count_for_ballot_returned(self, voter_id, polling_location_we_vote_id,
google_civic_election_id):
voter_id = convert_to_int(voter_id)
google_civic_election_id = convert_to_int(google_civic_election_id)
try:
ballot_item_queryset = BallotItem.objects.all()
if positive_value_exists(voter_id):
ballot_item_queryset = ballot_item_queryset.filter(
voter_id=voter_id)
elif positive_value_exists(polling_location_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(
polling_location_we_vote_id__iexact=polling_location_we_vote_id)
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
return ballot_item_queryset.count()
except BallotItem.DoesNotExist:
# No ballot items found. Not a problem.
pass
except Exception as e:
pass
return 0
def copy_ballot_items(self, ballot_returned, to_voter_id):
status = ""
ballot_item_list = []
ballot_item_list_found = False
# Get all ballot items from the reference ballot_returned
if positive_value_exists(ballot_returned.polling_location_we_vote_id):
retrieve_results = self.retrieve_all_ballot_items_for_polling_location(
ballot_returned.polling_location_we_vote_id, ballot_returned.google_civic_election_id)
status += retrieve_results['status']
ballot_item_list_found = retrieve_results['ballot_item_list_found']
ballot_item_list = retrieve_results['ballot_item_list']
elif positive_value_exists(ballot_returned.voter_id):
retrieve_results = self.retrieve_all_ballot_items_for_voter(ballot_returned.voter_id,
ballot_returned.google_civic_election_id)
status += retrieve_results['status']
ballot_item_list_found = retrieve_results['ballot_item_list_found']
ballot_item_list = retrieve_results['ballot_item_list']
if not ballot_item_list_found:
error_results = {
'ballot_returned_copied': False,
'success': False,
'status': status,
}
return error_results
ballot_item_manager = BallotItemManager()
# This is a list of ballot items, usually from a polling location, that we are copying over to a voter
for one_ballot_item in ballot_item_list:
defaults = {}
defaults['measure_url'] = one_ballot_item.measure_url
defaults['yes_vote_description'] = one_ballot_item.yes_vote_description
defaults['no_vote_description'] = one_ballot_item.no_vote_description
create_results = ballot_item_manager.update_or_create_ballot_item_for_voter(
to_voter_id, ballot_returned.google_civic_election_id, one_ballot_item.google_ballot_placement,
one_ballot_item.ballot_item_display_name, one_ballot_item.measure_subtitle,
one_ballot_item.measure_text,
one_ballot_item.local_ballot_order,
one_ballot_item.contest_office_id, one_ballot_item.contest_office_we_vote_id,
one_ballot_item.contest_measure_id, one_ballot_item.contest_measure_we_vote_id,
one_ballot_item.state_code, defaults)
if not create_results['success']:
status += create_results['status']
results = {
'ballot_returned_copied': True,
'success': True,
'status': status,
}
return results
def refresh_ballot_items_from_master_tables(self, voter_id, google_civic_election_id,
offices_dict={}, measures_dict={}):
"""
:param voter_id:
:param google_civic_election_id:
:param offices_dict: # key is office_we_vote_id, value is the office object
:param measures_dict: # key is measure_we_vote_id, value is the measure object
:return:
"""
status = ""
if not positive_value_exists(voter_id) or not positive_value_exists(google_civic_election_id):
status += "REFRESH_BALLOT_ITEMS_FROM_MASTER_TABLES-MISSING_VOTER_OR_ELECTION "
error_results = {
'success': False,
'status': status,
'offices_dict': offices_dict,
'measures_dict': measures_dict,
}
return error_results
# Get all ballot items for this voter
retrieve_results = self.retrieve_all_ballot_items_for_voter(voter_id, google_civic_election_id)
status += retrieve_results['status']
ballot_item_list_found = retrieve_results['ballot_item_list_found']
ballot_item_list = retrieve_results['ballot_item_list']
if not ballot_item_list_found:
error_results = {
'success': False,
'status': status,
'offices_dict': offices_dict,
'measures_dict': measures_dict,
}
return error_results
ballot_item_manager = BallotItemManager()
measure_manager = ContestMeasureManager()
office_manager = ContestOfficeManager()
measures_not_found = []
offices_not_found = []
for one_ballot_item in ballot_item_list:
defaults = {}
google_ballot_placement = one_ballot_item.google_ballot_placement
ballot_item_display_name = one_ballot_item.ballot_item_display_name
measure_subtitle = one_ballot_item.measure_subtitle
measure_text = one_ballot_item.measure_text
if positive_value_exists(one_ballot_item.contest_measure_we_vote_id):
measure_found = False
if one_ballot_item.contest_measure_we_vote_id in measures_dict:
measure = measures_dict[one_ballot_item.contest_measure_we_vote_id]
measure_found = True
else:
if one_ballot_item.contest_measure_we_vote_id not in measures_not_found:
results = measure_manager.retrieve_contest_measure_from_we_vote_id(
one_ballot_item.contest_measure_we_vote_id)
if results['contest_measure_found']:
measure = results['contest_measure']
measures_dict[measure.we_vote_id] = measure
measure_found = True
else:
measures_not_found.append(one_ballot_item.contest_measure_we_vote_id)
if measure_found:
defaults['measure_url'] = measure.get_measure_url()
defaults['yes_vote_description'] = measure.ballotpedia_yes_vote_description
defaults['no_vote_description'] = measure.ballotpedia_no_vote_description
google_ballot_placement = measure.google_ballot_placement
ballot_item_display_name = measure.measure_title
measure_subtitle = measure.measure_subtitle
measure_text = measure.measure_text
elif positive_value_exists(one_ballot_item.contest_office_we_vote_id):
office_found = False
if one_ballot_item.contest_office_we_vote_id in offices_dict:
office = offices_dict[one_ballot_item.contest_office_we_vote_id]
office_found = True
else:
if one_ballot_item.contest_office_we_vote_id not in offices_not_found:
results = office_manager.retrieve_contest_office_from_we_vote_id(
one_ballot_item.contest_office_we_vote_id)
if results['contest_office_found']:
office = results['contest_office']
offices_dict[office.we_vote_id] = office
office_found = True
else:
offices_not_found.append(one_ballot_item.contest_office_we_vote_id)
if office_found:
google_ballot_placement = office.google_ballot_placement
ballot_item_display_name = office.office_name
create_results = ballot_item_manager.update_or_create_ballot_item_for_voter(
voter_id, google_civic_election_id, google_ballot_placement,
ballot_item_display_name, measure_subtitle,
measure_text,
one_ballot_item.local_ballot_order,
one_ballot_item.contest_office_id, one_ballot_item.contest_office_we_vote_id,
one_ballot_item.contest_measure_id, one_ballot_item.contest_measure_we_vote_id,
one_ballot_item.state_code, defaults)
if not create_results['success']:
status += create_results['status']
results = {
'success': True,
'status': status,
'offices_dict': offices_dict,
'measures_dict': measures_dict,
}
return results
def retrieve_possible_duplicate_ballot_items(self, ballot_item_display_name, google_civic_election_id,
polling_location_we_vote_id, voter_id,
contest_office_we_vote_id, contest_measure_we_vote_id,
state_code):
ballot_item_list_objects = []
ballot_item_list_found = False
ballot_item_list_count = 0
if not positive_value_exists(google_civic_election_id):
# We must have a google_civic_election_id
results = {
'success': False,
'status': "MISSING_GOOGLE_CIVIC_ELECTION_ID ",
'google_civic_election_id': google_civic_election_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list_objects,
}
return results
elif not positive_value_exists(polling_location_we_vote_id) \
and not positive_value_exists(voter_id):
# We must have a polling_location_we_vote_id to look up
results = {
'success': False,
'status': "MISSING_POLLING_LOCATION_WE_VOTE_ID_AND_VOTER_ID ",
'google_civic_election_id': google_civic_election_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list_objects,
}
return results
elif not positive_value_exists(ballot_item_display_name) \
and not positive_value_exists(contest_office_we_vote_id) \
and not positive_value_exists(contest_measure_we_vote_id):
# We must have at least one of these
results = {
'success': False,
'status': "MISSING_MEASURE_AND_OFFICE_WE_VOTE_ID",
'google_civic_election_id': google_civic_election_id,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list_objects,
}
return results
try:
ballot_item_queryset = BallotItem.objects.all()
ballot_item_queryset = ballot_item_queryset.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(polling_location_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(
polling_location_we_vote_id__iexact=polling_location_we_vote_id)
else:
ballot_item_queryset = ballot_item_queryset.filter(
voter_id=voter_id)
if positive_value_exists(state_code):
ballot_item_queryset = ballot_item_queryset.filter(state_code__iexact=state_code)
# We want to find candidates with *any* of these values
if positive_value_exists(ballot_item_display_name):
ballot_item_queryset = ballot_item_queryset.filter(
ballot_item_display_name__iexact=ballot_item_display_name)
if positive_value_exists(contest_office_we_vote_id):
# Ignore entries with contest_office_we_vote_id coming in from master server
ballot_item_queryset = ballot_item_queryset.filter(~Q(
contest_office_we_vote_id__iexact=contest_office_we_vote_id))
elif positive_value_exists(contest_measure_we_vote_id):
# Ignore entries with contest_measure_we_vote_id coming in from master server
ballot_item_queryset = ballot_item_queryset.filter(~Q(
contest_measure_we_vote_id__iexact=contest_measure_we_vote_id))
elif positive_value_exists(contest_office_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(
contest_office_we_vote_id__iexact=contest_office_we_vote_id)
elif positive_value_exists(contest_measure_we_vote_id):
ballot_item_queryset = ballot_item_queryset.filter(
contest_measure_we_vote_id__iexact=contest_measure_we_vote_id)
ballot_item_list_objects = list(ballot_item_queryset)
ballot_item_list_count = len(ballot_item_list_objects)
if ballot_item_list_count:
ballot_item_list_found = True
status = 'DUPLICATE_BALLOT_ITEMS_RETRIEVED '
success = True
else:
status = 'NO_DUPLICATE_BALLOT_ITEMS_RETRIEVED '
success = True
except BallotItem.DoesNotExist:
# No ballot_items found. Not a problem.
status = 'NO_DUPLICATE_BALLOT_ITEMS_FOUND_DoesNotExist '
ballot_item_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_possible_duplicate_ballot_items ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'ballot_item_list_count': ballot_item_list_count,
'ballot_item_list_found': ballot_item_list_found,
'ballot_item_list': ballot_item_list_objects,
}
return results
class BallotReturned(models.Model):
"""
This is a generated table with a summary of address + election combinations returned ballot data
"""
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True,
blank=True, unique=True)
# Either voter_id or polling_location_we_vote_id will be set, but not both.
# The unique id of the voter for which this ballot was retrieved.
voter_id = models.IntegerField(verbose_name="the voter unique id", null=True, blank=True)
# The polling location for which this ballot was retrieved
polling_location_we_vote_id = models.CharField(
verbose_name="we vote permanent id of the polling location", max_length=255, default=None, null=True,
blank=True, unique=False)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, db_index=True)
# state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
election_description_text = models.CharField(max_length=255, blank=False, null=False,
verbose_name='text label for this election')
election_date = models.DateField(verbose_name='election start date', null=True, auto_now=False)
# Should we show this ballot as an option for this election?
ballot_location_display_option_on = models.BooleanField(default=False)
ballot_location_display_name = models.CharField(verbose_name='name that shows in button',
max_length=255, blank=True, null=True, db_index=True)
ballot_location_shortcut = models.CharField(verbose_name='the url string to find this location',
max_length=255, blank=True, null=True)
ballot_location_order = models.PositiveIntegerField(
verbose_name="order of these ballot locations in display", default=0, null=False)
text_for_map_search = models.CharField(max_length=255, blank=False, null=False, verbose_name='address as entered')
latitude = models.FloatField(null=True, verbose_name='latitude returned from Google')
longitude = models.FloatField(null=True, verbose_name='longitude returned from Google')
normalized_line1 = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized address line 1 returned from Google')
normalized_line2 = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized address line 2 returned from Google')
normalized_city = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized city returned from Google')
normalized_state = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized state returned from Google')
normalized_zip = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized zip returned from Google')
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this voter_guide came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
self.generate_new_we_vote_id()
super(BallotReturned, self).save(*args, **kwargs)
def generate_new_we_vote_id(self):
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_ballot_returned_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "ballot" = tells us this is a unique id for a ballot_returned entry
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}ballot{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
# TODO we need to deal with the situation where we_vote_id is NOT unique on save
return
def election_day_text(self):
if isinstance(self.election_date, date):
# Consider using: and isinstance(self.election_date, str)
return self.election_date.strftime('%Y-%m-%d')
else:
return ""
class BallotReturnedManager(models.Model):
"""
Scenario where we get an incomplete address and Google Civic can't find it:
1. A voter enters an address into text_for_map_search.
2. Try to get ballot from Google Civic, if no response found...
3. We search for the closest address for this election in the ballot_returned table.
4. We find the closest address.
5. We then assemble the ballot from the ballot items table so we can offer that to the new voter.
6. Copy these ballot items over to this new voter
New ballot comes in and we want to cache it:
1. Search by voter_id (or polling_location_we_vote_id) + google_civic_election_id to see if have an entry
2. If so, update it. If not...
3. Save a new entry and attach it to either voter_id or polling_location_we_vote_id.
NOTE: I think it will be faster to just always save an entry at an address on the chance there are some
duplicates, instead of burning the db cycles to search for an existing entry
"""
def __unicode__(self):
return "BallotReturnedManager"
def remove_duplicate_ballot_returned_entries(self, google_civic_election_id, polling_location_we_vote_id, voter_id):
status = ""
success = ""
ballot_returned_found = False
ballot_returned = None
ballot_returned_list_manager = BallotReturnedListManager()
retrieve_results = ballot_returned_list_manager.retrieve_ballot_returned_duplicate_list(
google_civic_election_id, polling_location_we_vote_id, voter_id)
if retrieve_results['ballot_returned_list_count'] == 1:
# Only one found
ballot_returned_list = retrieve_results['ballot_returned_list']
ballot_returned = ballot_returned_list[0]
ballot_returned_found = True
elif retrieve_results['ballot_returned_list_count'] > 1:
# If here, we found a duplicate
first_one_kept = False
ballot_returned_list = retrieve_results['ballot_returned_list']
for one_ballot_returned in ballot_returned_list:
if first_one_kept:
one_ballot_returned.delete()
else:
ballot_returned = one_ballot_returned
ballot_returned_found = True
first_one_kept = True
results = {
"status": status,
"success": success,
"ballot_returned_found": ballot_returned_found,
"ballot_returned": ballot_returned,
}
return results
def retrieve_ballot_returned_from_google_civic_election_id(self, google_civic_election_id):
ballot_returned_id = 0
ballot_returned_manager = BallotReturnedManager()
return ballot_returned_manager.retrieve_existing_ballot_returned_by_identifier(ballot_returned_id,
google_civic_election_id)
def retrieve_ballot_returned_from_voter_id(self, voter_id, google_civic_election_id):
ballot_returned_id = 0
ballot_returned_manager = BallotReturnedManager()
return ballot_returned_manager.retrieve_existing_ballot_returned_by_identifier(ballot_returned_id,
google_civic_election_id,
voter_id)
def retrieve_ballot_returned_from_polling_location_we_vote_id(self, polling_location_we_vote_id,
google_civic_election_id):
ballot_returned_id = 0
voter_id = 0
ballot_returned_manager = BallotReturnedManager()
return ballot_returned_manager.retrieve_existing_ballot_returned_by_identifier(ballot_returned_id,
google_civic_election_id,
voter_id,
polling_location_we_vote_id)
def retrieve_ballot_returned_from_ballot_returned_we_vote_id(self, ballot_returned_we_vote_id):
ballot_returned_id = 0
google_civic_election_id = 0
voter_id = 0
polling_location_we_vote_id = ''
ballot_returned_manager = BallotReturnedManager()
return ballot_returned_manager.retrieve_existing_ballot_returned_by_identifier(
ballot_returned_id, google_civic_election_id, voter_id, polling_location_we_vote_id,
ballot_returned_we_vote_id)
def retrieve_ballot_returned_from_ballot_location_shortcut(self, ballot_location_shortcut):
ballot_returned_id = 0
google_civic_election_id = 0
voter_id = 0
polling_location_we_vote_id = ''
ballot_returned_we_vote_id = ''
ballot_returned_manager = BallotReturnedManager()
return ballot_returned_manager.retrieve_existing_ballot_returned_by_identifier(
ballot_returned_id, google_civic_election_id, voter_id, polling_location_we_vote_id,
ballot_returned_we_vote_id, ballot_location_shortcut)
def retrieve_existing_ballot_returned_by_identifier(
self, ballot_returned_id, google_civic_election_id=0, voter_id=0, polling_location_we_vote_id='',
ballot_returned_we_vote_id='', ballot_location_shortcut=''):
"""
Search by voter_id (or polling_location_we_vote_id) + google_civic_election_id to see if have an entry
:param ballot_returned_id:
:param google_civic_election_id:
:param voter_id:
:param polling_location_we_vote_id:
:param ballot_returned_we_vote_id:
:param ballot_location_shortcut:
:return:
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
ballot_returned_found = False
ballot_returned = BallotReturned()
try:
if positive_value_exists(ballot_returned_id):
ballot_returned = BallotReturned.objects.get(id=ballot_returned_id)
# If still here, we found an existing ballot_returned
ballot_returned_id = ballot_returned.id
ballot_returned_found = True if positive_value_exists(ballot_returned_id) else False
success = True
status = "BALLOT_RETURNED_FOUND_FROM_VOTER_ID "
elif positive_value_exists(ballot_returned_we_vote_id):
ballot_returned = BallotReturned.objects.get(we_vote_id__iexact=ballot_returned_we_vote_id)
# If still here, we found an existing ballot_returned
ballot_returned_id = ballot_returned.id
ballot_returned_found = True if positive_value_exists(ballot_returned_id) else False
success = True
status = "BALLOT_RETURNED_FOUND_FROM_BALLOT_RETURNED_WE_VOTE_ID "
elif positive_value_exists(ballot_location_shortcut):
ballot_returned = BallotReturned.objects.get(ballot_location_shortcut=ballot_location_shortcut)
# If still here, we found an existing ballot_returned
ballot_returned_id = ballot_returned.id
ballot_returned_found = True if positive_value_exists(ballot_returned_id) else False
success = True
status = "BALLOT_RETURNED_FOUND_FROM_BALLOT_RETURNED_LOCATION_SHORTCUT "
elif positive_value_exists(voter_id) and positive_value_exists(google_civic_election_id):
ballot_returned = BallotReturned.objects.get(voter_id=voter_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing ballot_returned
ballot_returned_id = ballot_returned.id
ballot_returned_found = True if positive_value_exists(ballot_returned_id) else False
success = True
status = "BALLOT_RETURNED_FOUND_FROM_VOTER_ID "
elif positive_value_exists(polling_location_we_vote_id) and positive_value_exists(google_civic_election_id):
ballot_returned = BallotReturned.objects.get(polling_location_we_vote_id=polling_location_we_vote_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing ballot_returned
ballot_returned_id = ballot_returned.id
ballot_returned_found = True if positive_value_exists(ballot_returned_id) else False
success = True
status = "BALLOT_RETURNED_FOUND_FROM_POLLING_LOCATION_WE_VOTE_ID "
elif positive_value_exists(google_civic_election_id):
ballot_returned_query = BallotReturned.objects.filter(google_civic_election_id=google_civic_election_id)
ballot_returned_query = ballot_returned_query.order_by("-ballot_location_shortcut")
ballot_returned = ballot_returned_query.first()
if ballot_returned and hasattr(ballot_returned, "id"):
# If still here, we found an existing ballot_returned
ballot_returned_id = ballot_returned.id
ballot_returned_found = True if positive_value_exists(ballot_returned_id) else False
success = True
status = "BALLOT_RETURNED_FOUND_FROM_GOOGLE_CIVIC_ELECTION_ID "
else:
ballot_returned_found = False
success = True
status = "BALLOT_RETURNED_NOT_FOUND_FROM_GOOGLE_CIVIC_ELECTION_ID "
else:
ballot_returned_found = False
success = False
status = "COULD_NOT_RETRIEVE_BALLOT_RETURNED-MISSING_VARIABLES "
except BallotReturned.MultipleObjectsReturned as e:
exception_multiple_object_returned = True
success = False
status = "MULTIPLE_BALLOT_RETURNED-MUST_DELETE_ALL "
except BallotReturned.DoesNotExist:
exception_does_not_exist = True
success = True
status = "BALLOT_RETURNED_NOT_FOUND "
except Exception as e:
success = False
status = "COULD_NOT_RETRIEVE_BALLOT_RETURNED-EXCEPTION "
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'ballot_returned_found': ballot_returned_found,
'ballot_returned': ballot_returned,
}
return results
def create_ballot_returned_with_normalized_values(self, google_civic_address_dict,
election_day_text, election_description_text,
google_civic_election_id,
voter_id=0, polling_location_we_vote_id='',
latitude='', longitude=''):
# Protect against ever saving test elections in the BallotReturned table
if positive_value_exists(google_civic_election_id) and convert_to_int(google_civic_election_id) == 2000:
results = {
'status': "CHOSE_TO_NOT_SAVE_BALLOT_RETURNED_FOR_TEST_ELECTION",
'success': True,
'ballot_returned': None,
'ballot_returned_found': False,
}
return results
# We assume that we tried to find an entry for this voter or polling_location
try:
ballot_returned_id = 0
if positive_value_exists(voter_id) and positive_value_exists(google_civic_election_id):
ballot_returned = BallotReturned.objects.create(google_civic_election_id=google_civic_election_id,
voter_id=voter_id,
election_date=election_day_text,
election_description_text=election_description_text)
ballot_returned_id = ballot_returned.id
elif positive_value_exists(polling_location_we_vote_id) and positive_value_exists(google_civic_election_id):
ballot_returned = BallotReturned.objects.create(google_civic_election_id=google_civic_election_id,
polling_location_we_vote_id=polling_location_we_vote_id,
election_date=election_day_text,
election_description_text=election_description_text)
ballot_returned_id = ballot_returned.id
else:
ballot_returned = None
if positive_value_exists(ballot_returned_id):
text_for_map_search = ''
if 'line1' in google_civic_address_dict:
ballot_returned.normalized_line1 = google_civic_address_dict['line1']
text_for_map_search += ballot_returned.normalized_line1 + ", "
if 'line2' in google_civic_address_dict:
ballot_returned.normalized_line2 = google_civic_address_dict['line2']
text_for_map_search += ballot_returned.normalized_line2 + ", "
ballot_returned.normalized_city = google_civic_address_dict['city']
text_for_map_search += ballot_returned.normalized_city + ", "
ballot_returned.normalized_state = google_civic_address_dict['state']
text_for_map_search += ballot_returned.normalized_state + " "
if 'zip' in google_civic_address_dict:
ballot_returned.normalized_zip = google_civic_address_dict['zip']
text_for_map_search += ballot_returned.normalized_zip
if latitude or longitude:
ballot_returned.latitude = latitude
ballot_returned.longitude = longitude
ballot_returned.text_for_map_search = text_for_map_search
ballot_returned.save()
status = "SAVED_BALLOT_RETURNED_WITH_NORMALIZED_VALUES"
success = True
ballot_returned_found = True
else:
status = "UNABLE_TO_CREATE_BALLOT_RETURNED_WITH_NORMALIZED_VALUES"
success = False
ballot_returned_found = False
except Exception as e:
status = "UNABLE_TO_CREATE_BALLOT_RETURNED_WITH_NORMALIZED_VALUES_EXCEPTION"
success = False
ballot_returned = None
ballot_returned_found = False
results = {
'status': status,
'success': success,
'ballot_returned': ballot_returned,
'ballot_returned_found': ballot_returned_found,
}
return results
def is_ballot_returned_different(self, google_civic_address_dict, ballot_returned):
if 'line1' in google_civic_address_dict:
if not ballot_returned.normalized_line1 == google_civic_address_dict['line1']:
return True
elif positive_value_exists(ballot_returned.normalized_line1):
return True
if 'line2' in google_civic_address_dict:
if not ballot_returned.normalized_line2 == google_civic_address_dict['line2']:
return True
elif positive_value_exists(ballot_returned.normalized_line2):
return True
if not ballot_returned.normalized_city == google_civic_address_dict['city']:
return True
if not ballot_returned.normalized_state == google_civic_address_dict['state']:
return True
if 'zip' in google_civic_address_dict:
if not ballot_returned.normalized_zip == google_civic_address_dict['zip']:
return True
elif positive_value_exists(ballot_returned.normalized_zip):
return True
if not positive_value_exists(ballot_returned.text_for_map_search):
return True
return False
def update_ballot_returned_with_normalized_values(self, google_civic_address_dict, ballot_returned,
latitude='', longitude=''):
try:
text_for_map_search = ''
if self.is_ballot_returned_different(google_civic_address_dict, ballot_returned):
if 'line1' in google_civic_address_dict:
ballot_returned.normalized_line1 = google_civic_address_dict['line1']
text_for_map_search += ballot_returned.normalized_line1 + ", "
if 'line2' in google_civic_address_dict:
ballot_returned.normalized_line2 = google_civic_address_dict['line2']
text_for_map_search += ballot_returned.normalized_line2 + ", "
ballot_returned.normalized_city = google_civic_address_dict['city']
text_for_map_search += ballot_returned.normalized_city + ", "
ballot_returned.normalized_state = google_civic_address_dict['state']
text_for_map_search += ballot_returned.normalized_state + " "
if 'zip' in google_civic_address_dict:
ballot_returned.normalized_zip = google_civic_address_dict['zip']
text_for_map_search += ballot_returned.normalized_zip
if latitude or longitude:
ballot_returned.latitude = latitude
ballot_returned.longitude = longitude
ballot_returned.text_for_map_search = text_for_map_search
ballot_returned.save()
status = "UPDATED_BALLOT_RETURNED_WITH_NORMALIZED_VALUES "
success = True
else:
status = "BALLOT_RETURNED_ALREADY_MATCHES_NORMALIZED_VALUES "
success = False
except Exception as e:
status = "UNABLE_TO_UPDATE_BALLOT_RETURNED_WITH_NORMALIZED_VALUES_EXCEPTION "
success = False
results = {
'status': status,
'success': success,
'ballot_returned': ballot_returned,
}
return results
def fetch_last_election_in_this_state(self, state_code):
"""
Find the last election (in the past) that has at least one ballot at a polling location
:param state_code:
:return:
"""
if not positive_value_exists(state_code):
return 0
election_manager = ElectionManager()
election_results = election_manager.retrieve_elections_by_date()
filtered_election_list = []
today = datetime.now().date()
today_date_as_integer = convert_date_to_date_as_integer(today)
if election_results['success']:
# These elections are sorted by most recent to least recent
election_list = election_results['election_list']
for election in election_list:
# Filter out elections later than today
if not positive_value_exists(election.election_day_text):
continue
election_date_as_simple_string = election.election_day_text.replace("-", "")
this_election_date_as_integer = convert_to_int(election_date_as_simple_string)
if this_election_date_as_integer > today_date_as_integer:
continue
# Leave national elections which have a blank state_code, and then add elections in this state
if not positive_value_exists(election.state_code):
filtered_election_list.append(election)
elif election.state_code.lower() == state_code.lower():
filtered_election_list.append(election)
else:
# Neither national nor in this state
pass
if not len(filtered_election_list):
return 0
# Start with list of elections (before today) in this state,
# including national but without elections in other states
for election in filtered_election_list:
try:
# Loop backwards in time until we find an election with at least one ballot_returned entry
ballot_returned_query = BallotReturned.objects.filter(
google_civic_election_id=election.google_civic_election_id)
# Only return entries saved for polling_locations
ballot_returned_query = ballot_returned_query.exclude(polling_location_we_vote_id=None)
at_least_one_ballot_returned_for_election = ballot_returned_query.count()
if positive_value_exists(at_least_one_ballot_returned_for_election):
# Break out and return this election_id
return election.google_civic_election_id
except Exception as e:
return 0
# If we got through the elections without finding any ballot_returned entries, there is no prior elections
return 0
def fetch_next_upcoming_election_in_this_state(self, state_code):
"""
Find the soonest upcoming election in the future with at least one ballot at a polling location
:param state_code:
:return:
"""
if not positive_value_exists(state_code):
return 0
election_manager = ElectionManager()
newest_to_oldest = False # We want oldest to newest since we are looking for the next election
election_results = election_manager.retrieve_elections_by_date(newest_to_oldest)
filtered_election_list = []
today = datetime.now().date()
today_date_as_integer = convert_date_to_date_as_integer(today)
if election_results['success']:
# These elections are sorted by today, then tomorrow, etc
election_list = election_results['election_list']
for election in election_list:
# Filter out elections earlier than today
if not positive_value_exists(election.election_day_text):
continue
election_date_as_simple_string = election.election_day_text.replace("-", "")
this_election_date_as_integer = convert_to_int(election_date_as_simple_string)
if this_election_date_as_integer < today_date_as_integer:
continue
# Leave national elections which have a blank state_code, and then add elections in this state
if not positive_value_exists(election.state_code):
filtered_election_list.append(election)
elif election.state_code.lower() == state_code.lower():
filtered_election_list.append(election)
else:
# Neither national nor in this state
pass
if not len(filtered_election_list):
return 0
# Start with list of elections (before today) in this state,
# including national but without elections in other states
for election in filtered_election_list:
try:
# Loop backwards in time until we find an election with at least one ballot_returned entry
ballot_returned_query = BallotReturned.objects.filter(
google_civic_election_id=election.google_civic_election_id)
# Only return entries saved for polling_locations
ballot_returned_query = ballot_returned_query.exclude(polling_location_we_vote_id=None)
at_least_one_ballot_returned_for_election = ballot_returned_query.count()
if positive_value_exists(at_least_one_ballot_returned_for_election):
# Break out and return this election_id
return election.google_civic_election_id
except Exception as e:
return 0
# If we got through the elections without finding any ballot_returned entries, there is no prior election
return 0
def find_closest_ballot_returned(self, text_for_map_search, google_civic_election_id=0):
"""
We search for the closest address for this election in the ballot_returned table. We never have to worry
about test elections being returned with this routine, because we don't store ballot_returned entries for
test elections.
:param text_for_map_search:
:param google_civic_election_id:
:return:
"""
ballot_returned_found = False
ballot_returned = None
location = None
try_without_maps_key = False
status = ""
state_code = ""
if not positive_value_exists(text_for_map_search):
status += "FIND_CLOSEST_BALLOT_RETURNED-NO_TEXT_FOR_MAP_SEARCH "
return {
'status': status,
'geocoder_quota_exceeded': False,
'ballot_returned_found': ballot_returned_found,
'ballot_returned': ballot_returned,
}
if not hasattr(self, 'google_client') or not self.google_client:
self.google_client = get_geocoder_for_service('google')(GOOGLE_MAPS_API_KEY)
try:
location = self.google_client.geocode(text_for_map_search, sensor=False, timeout=GEOCODE_TIMEOUT)
except GeocoderQuotaExceeded:
try_without_maps_key = True
status += "GEOCODER_QUOTA_EXCEEDED "
except Exception as e:
try_without_maps_key = True
status += 'GEOCODER_ERROR {error} [type: {error_type}] '.format(error=e, error_type=type(e))
logger.info(status + " @ " + text_for_map_search + " google_civic_election_id=" +
str(google_civic_election_id))
if try_without_maps_key:
# If we have exceeded our account, try without a maps key
try:
temp_google_client = get_geocoder_for_service('google')()
location = temp_google_client.geocode(text_for_map_search, sensor=False, timeout=GEOCODE_TIMEOUT)
except GeocoderQuotaExceeded:
results = {
'status': status,
'geocoder_quota_exceeded': True,
'ballot_returned_found': ballot_returned_found,
'ballot_returned': ballot_returned,
}
return results
except Exception as e:
location = None
ballot = None
if location is None:
status += 'Geocoder could not find location matching "{}". Trying City, State. '.format(text_for_map_search)
# If Geocoder is not able to give us a location, look to see if their voter entered their address as
# "city_name, state_code" eg: "Sunnyvale, CA". If so, try to parse the entry and get ballot data
# for that location
ballot_returned_query = BallotReturned.objects.all()
# Limit this query to entries stored for polling locations
ballot_returned_query = ballot_returned_query.exclude(
Q(polling_location_we_vote_id__isnull=True) | Q(polling_location_we_vote_id=""))
if "," in text_for_map_search:
address = text_for_map_search
state_code = address.split(', ')[-1]
state_code = state_code.upper()
city = address.split(', ')[-2]
city = city.lower()
if positive_value_exists(state_code):
ballot_returned_query = ballot_returned_query.filter(normalized_state__iexact=state_code)
# Searching by city is not critical for internal testing, and can cause problems
# if positive_value_exists(city):
# ballot_returned_query = ballot_returned_query.filter(normalized_city__iexact=city)
else:
ballot_returned_query = ballot_returned_query.filter(text_for_map_search__icontains=text_for_map_search)
if positive_value_exists(google_civic_election_id):
ballot_returned_query = ballot_returned_query.filter(google_civic_election_id=google_civic_election_id)
else:
# If we have an active election coming up, including today
# fetch_next_upcoming_election_in_this_state returns next election with ballot items
upcoming_google_civic_election_id = self.fetch_next_upcoming_election_in_this_state(state_code)
if positive_value_exists(upcoming_google_civic_election_id):
ballot_returned_query = ballot_returned_query.filter(
google_civic_election_id=upcoming_google_civic_election_id)
else:
past_google_civic_election_id = self.fetch_last_election_in_this_state(state_code)
if positive_value_exists(past_google_civic_election_id):
# Limit the search to the most recent election with ballot items
ballot_returned_query = ballot_returned_query.filter(
google_civic_election_id=past_google_civic_election_id)
ballot = ballot_returned_query.first()
else:
# If here, then the geocoder successfully found the address
status += 'GEOCODER_FOUND_LOCATION '
address = location.address
# address has format "line_1, state zip, USA"
ballot_returned_query = BallotReturned.objects.all()
# Limit this query to entries stored for polling locations
ballot_returned_query = ballot_returned_query.exclude(
Q(polling_location_we_vote_id__isnull=True) | Q(polling_location_we_vote_id=""))
if positive_value_exists(address):
state_code = address.split(', ')[-2][:2]
if positive_value_exists(state_code):
# This search for normalized_state is NOT redundant because some elections are in many states
ballot_returned_query = ballot_returned_query.filter(normalized_state__iexact=state_code)
if positive_value_exists(google_civic_election_id):
ballot_returned_query = ballot_returned_query.filter(google_civic_election_id=google_civic_election_id)
else:
# If we have an active election coming up, including today
# fetch_next_upcoming_election_in_this_state returns next election with ballot items
upcoming_google_civic_election_id = self.fetch_next_upcoming_election_in_this_state(state_code)
if positive_value_exists(upcoming_google_civic_election_id):
ballot_returned_query = ballot_returned_query.filter(
google_civic_election_id=upcoming_google_civic_election_id)
else:
past_google_civic_election_id = self.fetch_last_election_in_this_state(state_code)
if positive_value_exists(past_google_civic_election_id):
# Limit the search to the most recent election with ballot items
ballot_returned_query = ballot_returned_query.filter(
google_civic_election_id=past_google_civic_election_id)
# TODO: This should be updated to a more modern approach. I think this will be deprecated in > Django 1.9
ballot_returned_query = ballot_returned_query.annotate(distance=(F('latitude') - location.latitude) ** 2 +
(F('longitude') - location.longitude) ** 2)
ballot_returned_query = ballot_returned_query.order_by('distance')
ballot = ballot_returned_query.first()
# ballot_returned_list = list(ballot_returned_query)
# if positive_value_exists(len(ballot_returned_list)):
# ballot = ballot_returned_list[0]
if ballot is not None:
ballot_returned = ballot
ballot_returned_found = True
status += 'BALLOT_RETURNED_FOUND '
else:
status += 'NO_STORED_BALLOT_MATCHES_STATE {}. '.format(state_code)
return {
'status': status,
'geocoder_quota_exceeded': False,
'ballot_returned_found': ballot_returned_found,
'ballot_returned': ballot_returned,
}
def should_election_search_data_be_saved(self, google_civic_election_id):
if not positive_value_exists(google_civic_election_id):
return False
else:
ballot_returned_list_manager = BallotReturnedListManager()
ballot_returned_list_count = ballot_returned_list_manager.fetch_ballot_returned_list_count_for_election(
google_civic_election_id)
if positive_value_exists(ballot_returned_list_count):
return True
return False
def update_or_create_ballot_returned(
self, polling_location_we_vote_id, voter_id, google_civic_election_id, election_day_text=False,
election_description_text=False, latitude=False, longitude=False,
normalized_city=False, normalized_line1=False, normalized_line2=False, normalized_state=False,
normalized_zip=False, text_for_map_search=False, ballot_location_display_name=False):
status = ""
exception_multiple_object_returned = False
new_ballot_returned_created = False
google_civic_election_id = convert_to_int(google_civic_election_id)
ballot_returned = None
ballot_returned_found = False
delete_extra_ballot_returned_entries = False
success = True
if not google_civic_election_id:
success = False
status += 'MISSING_GOOGLE_CIVIC_ELECTION_ID-update_or_create_ballot_returned '
elif (not polling_location_we_vote_id) and (not voter_id):
success = False
status += 'MISSING_BALLOT_RETURNED_POLLING_LOCATION_AND_VOTER_ID-update_or_create_ballot_returned '
else:
try:
ballot_returned, new_ballot_returned_created = BallotReturned.objects.get_or_create(
google_civic_election_id__exact=google_civic_election_id,
polling_location_we_vote_id=polling_location_we_vote_id,
voter_id=voter_id
)
ballot_returned_found = True
except BallotReturned.MultipleObjectsReturned as e:
status += 'MULTIPLE_MATCHING_BALLOT_RETURNED_FOUND '
status += 'google_civic_election_id: ' + str(google_civic_election_id) + " "
status += 'polling_location_we_vote_id: ' + str(polling_location_we_vote_id) + " "
status += 'voter_id: ' + str(voter_id) + " "
handle_record_found_more_than_one_exception(e, logger=logger, exception_message_optional=status)
success = False
exception_multiple_object_returned = True
delete_extra_ballot_returned_entries = True
except Exception as e:
status += 'UNABLE_TO_GET_OR_CREATE_BALLOT_RETURNED '
handle_exception(e, logger=logger, exception_message=status)
success = False
delete_extra_ballot_returned_entries = True
if positive_value_exists(delete_extra_ballot_returned_entries):
success = False
ballot_returned_manager = BallotReturnedManager()
results = ballot_returned_manager.remove_duplicate_ballot_returned_entries(
google_civic_election_id, polling_location_we_vote_id, voter_id)
if results['ballot_returned_found']:
ballot_returned_found = True
ballot_returned = results['ballot_returned']
success = True
if positive_value_exists(ballot_returned_found):
try:
if not positive_value_exists(ballot_returned.google_civic_election_id):
ballot_returned.google_civic_election_id = google_civic_election_id
if not positive_value_exists(ballot_returned.voter_id):
ballot_returned.voter_id = voter_id
if ballot_location_display_name is not False:
ballot_returned.ballot_location_display_name = ballot_location_display_name
if election_day_text is not False and election_day_text is not None:
ballot_returned.election_date = datetime.strptime(election_day_text, "%Y-%m-%d").date()
if election_description_text is not False:
ballot_returned.election_description_text = election_description_text
if latitude is not False:
ballot_returned.latitude = latitude
if longitude is not False:
ballot_returned.longitude = longitude
if normalized_city is not False:
ballot_returned.normalized_city = normalized_city
if normalized_line1 is not False:
ballot_returned.normalized_line1 = normalized_line1
if normalized_line2 is not False:
ballot_returned.normalized_line2 = normalized_line2
if normalized_state is not False:
ballot_returned.normalized_state = normalized_state
if normalized_zip is not False:
ballot_returned.normalized_zip = normalized_zip
if text_for_map_search is not False:
ballot_returned.text_for_map_search = text_for_map_search
ballot_returned.save()
if new_ballot_returned_created:
success = True
status += 'BALLOT_RETURNED_CREATED '
else:
success = True
status += 'BALLOT_RETURNED_UPDATED '
except Exception as e:
status += 'UNABLE_TO_SAVE_BALLOT_RETURNED '
handle_exception(e, logger=logger, exception_message=status)
success = False
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'ballot_returned_found': ballot_returned_found,
'ballot_returned': ballot_returned,
'new_ballot_returned_created': new_ballot_returned_created,
}
return results
def populate_latitude_and_longitude_for_ballot_returned(self, ballot_returned_object):
"""
We use the google geocoder in partnership with geoip
:param ballot_returned_object:
:return:
"""
status = ""
# We try to use existing google_client
if not hasattr(self, 'google_client') or not self.google_client:
self.google_client = get_geocoder_for_service('google')(GOOGLE_MAPS_API_KEY)
if not hasattr(ballot_returned_object, "normalized_line1"):
results = {
'status': "POPULATE_LATITUDE_AND_LONGITUDE-NOT_A_BALLOT_RETURNED_OBJECT ",
'geocoder_quota_exceeded': False,
'success': False,
}
return results
if not positive_value_exists(ballot_returned_object.normalized_line1) or not \
positive_value_exists(ballot_returned_object.normalized_city) or not \
positive_value_exists(ballot_returned_object.normalized_state) or not \
positive_value_exists(ballot_returned_object.normalized_zip):
# We require all four values
results = {
'status': "POPULATE_LATITUDE_AND_LONGITUDE-MISSING_REQUIRED_ADDRESS_INFO ",
'geocoder_quota_exceeded': False,
'success': False,
}
return results
full_ballot_address = '{}, {}, {} {}'.format(
ballot_returned_object.normalized_line1,
ballot_returned_object.normalized_city,
ballot_returned_object.normalized_state,
ballot_returned_object.normalized_zip)
try:
location = self.google_client.geocode(full_ballot_address, sensor=False, timeout=GEOCODE_TIMEOUT)
except GeocoderQuotaExceeded:
status += "GeocoderQuotaExceeded "
results = {
'status': status,
'geocoder_quota_exceeded': True,
'success': False,
}
return results
except Exception as e:
status += "Geocoder-Exception: " + str(e) + " "
results = {
'status': status,
'geocoder_quota_exceeded': False,
'success': False,
}
return results
if location is None:
results = {
'status': "POPULATE_LATITUDE_AND_LONGITUDE-LOCATION_NOT_RETURNED_FROM_GEOCODER ",
'geocoder_quota_exceeded': False,
'success': False,
}
return results
try:
ballot_returned_object.latitude, ballot_returned_object.longitude = location.latitude, location.longitude
ballot_returned_object.save()
status += "BALLOT_RETURNED_SAVED_WITH_LATITUDE_AND_LONGITUDE "
success = True
except Exception as e:
status += "BALLOT_RETURNED_NOT_SAVED_WITH_LATITUDE_AND_LONGITUDE "
success = False
results = {
'status': status,
'geocoder_quota_exceeded': False,
'success': success,
}
return results
class BallotReturnedListManager(models.Model):
"""
A way to work with a list of ballot_returned entries
"""
def fetch_ballot_location_display_option_on_count_for_election(self, google_civic_election_id, state_code=''):
google_civic_election_id = convert_to_int(google_civic_election_id)
try:
ballot_returned_queryset = BallotReturned.objects.all()
ballot_returned_queryset = ballot_returned_queryset.filter(
google_civic_election_id=google_civic_election_id)
ballot_returned_queryset = ballot_returned_queryset.filter(ballot_location_display_option_on=True)
if positive_value_exists(state_code):
ballot_returned_queryset = ballot_returned_queryset.filter(normalized_state__iexact=state_code)
return ballot_returned_queryset.count()
except BallotReturned.DoesNotExist:
# No ballot items found. Not a problem.
pass
except Exception as e:
pass
return 0
def retrieve_ballot_returned_list(self, google_civic_election_id, polling_location_we_vote_id, limit=0):
google_civic_election_id = convert_to_int(google_civic_election_id)
ballot_returned_list = []
ballot_returned_list_found = False
if not positive_value_exists(google_civic_election_id) \
and not positive_value_exists(polling_location_we_vote_id):
results = {
'success': False,
'status': "RETRIEVE_BALLOT_RETURNED_MISSING_REQUIRED_VARIABLES ",
'ballot_returned_list_found': ballot_returned_list_found,
'ballot_returned_list': ballot_returned_list,
}
return results
try:
ballot_returned_queryset = BallotReturned.objects.all()
if positive_value_exists(google_civic_election_id):
ballot_returned_queryset = \
ballot_returned_queryset.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(polling_location_we_vote_id):
ballot_returned_queryset = \
ballot_returned_queryset.filter(polling_location_we_vote_id=polling_location_we_vote_id)
if positive_value_exists(limit):
ballot_returned_list = ballot_returned_queryset[:limit]
else:
ballot_returned_list = list(ballot_returned_queryset)
if len(ballot_returned_list):
ballot_returned_list_found = True
status = 'BALLOT_RETURNED_LIST_FOUND'
else:
status = 'NO_BALLOT_RETURNED_LIST_FOUND'
except BallotReturned.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_RETURNED_LIST_FOUND_DOES_NOT_EXIST'
ballot_returned_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_returned_list ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': True if ballot_returned_list_found else False,
'status': status,
'ballot_returned_list_found': ballot_returned_list_found,
'ballot_returned_list': ballot_returned_list,
}
return results
def retrieve_ballot_returned_duplicate_list(self, google_civic_election_id, polling_location_we_vote_id, voter_id):
success = True
status = ""
google_civic_election_id = convert_to_int(google_civic_election_id)
ballot_returned_list = []
ballot_returned_list_found = False
ballot_returned_list_count = 0
if not positive_value_exists(google_civic_election_id) \
or not positive_value_exists(polling_location_we_vote_id) \
or not positive_value_exists(voter_id):
status += "RETRIEVE_BALLOT_RETURNED_DUPLICATE_LIST_MISSING_REQUIRED_VARIABLES "
results = {
'success': False,
'status': status,
'ballot_returned_list': ballot_returned_list,
'ballot_returned_list_count': ballot_returned_list_count,
'ballot_returned_list_found': ballot_returned_list_found,
}
return results
try:
ballot_returned_queryset = BallotReturned.objects.all()
ballot_returned_queryset = \
ballot_returned_queryset.filter(google_civic_election_id=google_civic_election_id)
ballot_returned_queryset = \
ballot_returned_queryset.filter(polling_location_we_vote_id__iexact=polling_location_we_vote_id)
ballot_returned_queryset = ballot_returned_queryset.filter(voter_id=voter_id)
ballot_returned_list = list(ballot_returned_queryset)
ballot_returned_list_count = len(ballot_returned_list)
if positive_value_exists(ballot_returned_list_count):
ballot_returned_list_found = True
status += 'BALLOT_RETURNED_DUPLICATE_LIST_FOUND'
else:
status += 'NO_BALLOT_RETURNED_DUPLICATE_LIST_FOUND'
except BallotReturned.DoesNotExist:
# No ballot items found. Not a problem.
status += 'NO_BALLOT_RETURNED_DUPLICATE_LIST_FOUND_DOES_NOT_EXIST'
ballot_returned_list = []
except Exception as e:
handle_exception(e, logger=logger)
status += 'FAILED retrieve_ballot_returned_duplicate_list ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'ballot_returned_list': ballot_returned_list,
'ballot_returned_list_count': ballot_returned_list_count,
'ballot_returned_list_found': ballot_returned_list_found,
}
return results
def retrieve_ballot_returned_list_for_election(self, google_civic_election_id, state_code='', limit=0,
ballot_returned_search_str=''):
google_civic_election_id = convert_to_int(google_civic_election_id)
ballot_returned_list = []
ballot_returned_list_found = False
try:
ballot_returned_queryset = BallotReturned.objects.order_by('-id')
if positive_value_exists(ballot_returned_search_str):
filters = []
new_filter = Q(id__iexact=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(ballot_location_display_name__icontains=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(ballot_location_shortcut__icontains=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(text_for_map_search__icontains=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(normalized_state__icontains=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(we_vote_id__iexact=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(voter_id__iexact=ballot_returned_search_str)
filters.append(new_filter)
new_filter = Q(polling_location_we_vote_id__iexact=ballot_returned_search_str)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
ballot_returned_queryset = ballot_returned_queryset.filter(final_filters)
ballot_returned_queryset = ballot_returned_queryset.filter(
google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
ballot_returned_queryset = ballot_returned_queryset.filter(normalized_state__iexact=state_code)
ballot_returned_queryset = ballot_returned_queryset.order_by("ballot_location_display_name")
if positive_value_exists(limit):
ballot_returned_queryset = ballot_returned_queryset[:limit]
ballot_returned_list = ballot_returned_queryset
if len(ballot_returned_list):
ballot_returned_list_found = True
status = 'BALLOT_RETURNED_LIST_FOUND'
else:
status = 'NO_BALLOT_RETURNED_LIST_FOUND'
except BallotReturned.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_RETURNED_LIST_FOUND_DOES_NOT_EXIST'
ballot_returned_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_returned_list_for_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': True if ballot_returned_list_found else False,
'status': status,
'ballot_returned_list_found': ballot_returned_list_found,
'ballot_returned_list': ballot_returned_list,
}
return results
def fetch_ballot_returned_list_count_for_election(self, google_civic_election_id, state_code=''):
google_civic_election_id = convert_to_int(google_civic_election_id)
try:
ballot_returned_queryset = BallotReturned.objects.using('readonly').all()
ballot_returned_queryset = ballot_returned_queryset.filter(
google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
ballot_returned_queryset = ballot_returned_queryset.filter(normalized_state__iexact=state_code)
return ballot_returned_queryset.count()
except BallotReturned.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_RETURNED_LIST_FOUND_DOES_NOT_EXIST'
ballot_returned_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_returned_list_for_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
return 0
def fetch_ballot_returned_entries_needed_lat_long_for_election(self, google_civic_election_id, state_code=''):
google_civic_election_id = convert_to_int(google_civic_election_id)
try:
ballot_returned_queryset = BallotReturned.objects.using('readonly').all()
ballot_returned_queryset = ballot_returned_queryset.exclude(
Q(polling_location_we_vote_id=None) |
Q(polling_location_we_vote_id=""))
ballot_returned_queryset = ballot_returned_queryset.filter(latitude=None)
ballot_returned_queryset = ballot_returned_queryset.filter(
google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
ballot_returned_queryset = ballot_returned_queryset.filter(normalized_state__iexact=state_code)
return ballot_returned_queryset.count()
except BallotReturned.DoesNotExist:
# No ballot items found. Not a problem.
status = 'NO_BALLOT_RETURNED_LIST_FOUND_DOES_NOT_EXIST'
ballot_returned_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_ballot_returned_list_for_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
return 0
def retrieve_possible_duplicate_ballot_returned(self, google_civic_election_id, normalized_line1, normalized_zip,
polling_location_we_vote_id):
ballot_returned_list_objects = []
ballot_returned_list_found = False
if not positive_value_exists(normalized_line1) \
and not positive_value_exists(normalized_zip):
results = {
'success': False,
'status': "MISSING_LINE1_AND_ZIP",
'google_civic_election_id': google_civic_election_id,
'ballot_returned_list_found': ballot_returned_list_found,
'ballot_returned_list': ballot_returned_list_objects,
}
return results
try:
ballot_returned_queryset = BallotReturned.objects.all()
ballot_returned_queryset = ballot_returned_queryset.filter(
google_civic_election_id=google_civic_election_id)
ballot_returned_queryset = ballot_returned_queryset.filter(normalized_line1__iexact=normalized_line1)
ballot_returned_queryset = ballot_returned_queryset.filter(normalized_zip__iexact=normalized_zip)
# Ignore entries with polling_location_we_vote_id coming in from master server
ballot_returned_queryset = ballot_returned_queryset.filter(~Q(
polling_location_we_vote_id__iexact=polling_location_we_vote_id))
ballot_returned_list_objects = ballot_returned_queryset
if len(ballot_returned_list_objects):
ballot_returned_list_found = True
status = 'DUPLICATE_BALLOT_RETURNED_ITEMS_RETRIEVED'
success = True
else:
status = 'NO_DUPLICATE_BALLOT_RETURNED_ITEMS_RETRIEVED'
success = True
except BallotReturned.DoesNotExist:
# No ballot_returned found. Not a problem.
status = 'NO_DUPLICATE_BALLOT_RETURNED_ITEMS_FOUND_DoesNotExist'
ballot_returned_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_possible_duplicate_ballot_returned ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'ballot_returned_list_found': ballot_returned_list_found,
'ballot_returned_list': ballot_returned_list_objects,
}
return results
class VoterBallotSaved(models.Model):
"""
This is a table with a meta data about a voter's various elections they have looked at and might return to
"""
# The unique id of the voter for which this ballot was retrieved
voter_id = models.IntegerField(verbose_name="the voter unique id", default=0, null=False, blank=False)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
election_description_text = models.CharField(max_length=255, blank=False, null=False,
verbose_name='text label for this election')
# Note that internally we often use election_day_text ("YYYY-MM-DD") and then save it as election_date (DateField)
election_date = models.DateField(verbose_name='election start date', null=True, auto_now=False)
original_text_for_map_search = models.CharField(max_length=255, blank=False, null=False,
verbose_name='address as entered')
original_text_city = models.CharField(max_length=255, null=True)
original_text_state = models.CharField(max_length=255, null=True)
original_text_zip = models.CharField(max_length=255, null=True)
substituted_address_city = models.CharField(max_length=255, null=True)
substituted_address_state = models.CharField(max_length=255, null=True)
substituted_address_zip = models.CharField(max_length=255, null=True)
substituted_address_nearby = models.CharField(max_length=255, blank=False, null=False,
verbose_name='address from nearby ballot_returned')
is_from_substituted_address = models.BooleanField(default=False)
is_from_test_ballot = models.BooleanField(default=False)
# The polling location for which this ballot was retrieved
polling_location_we_vote_id_source = models.CharField(
verbose_name="we vote permanent id of the polling location this was copied from",
max_length=255, default=None, null=True, blank=True, unique=False)
# When we copy a ballot from a master ballot_returned entry, we want to store a link back to that source
ballot_returned_we_vote_id = models.CharField(
verbose_name="ballot_returned we_vote_id this was copied from",
max_length=255, default=None, null=True, blank=True, unique=False)
ballot_location_display_name = models.CharField(
verbose_name="the name of the ballot the voter is looking at",
max_length=255, default=None, null=True, blank=True, unique=False)
ballot_location_shortcut = models.CharField(
verbose_name="the url string used to find specific ballot",
max_length=255, default=None, null=True, blank=True, unique=False)
def election_day_text(self):
if isinstance(self.election_date, date):
return self.election_date.strftime('%Y-%m-%d')
else:
return ""
def ballot_caveat(self):
message = ''
if self.is_from_substituted_address:
message += "Ballot displayed is from a nearby address: '{substituted_address_nearby}'." \
"".format(substituted_address_nearby=self.substituted_address_nearby)
if self.is_from_test_ballot:
message += "Ballot displayed is a TEST ballot, and is for demonstration purposes only."
return message
class VoterBallotSavedManager(models.Model):
"""
"""
def retrieve_ballots_per_voter_id(self, voter_id):
voter_ballot_list = []
voter_ballot_list_found = False
status = ""
success = False
if positive_value_exists(voter_id):
try:
voter_ballot_list_queryset = VoterBallotSaved.objects.using('readonly').filter(voter_id=voter_id)
voter_ballot_list_queryset = voter_ballot_list_queryset.order_by("-election_date") # Newest first
voter_ballot_list = list(voter_ballot_list_queryset)
success = True
status += "VOTER_BALLOT_LIST_RETRIEVED_PER_VOTER_ID"
voter_ballot_list_found = len(voter_ballot_list)
except Exception as e:
success = False
status += "VOTER_BALLOT_LIST_FAILED_TO_RETRIEVE_PER_VOTER_ID"
else:
status += "VOTER_BALLOT_LIST_NOT_RETRIEVED-MISSING_VOTER_ID"
results = {
'success': success,
'status': status,
'voter_ballot_list_found': voter_ballot_list_found,
'voter_ballot_list': voter_ballot_list,
}
return results
def __unicode__(self):
return "VoterBallotSavedManager"
def delete_voter_ballot_saved_by_voter_id(self, voter_id, google_civic_election_id):
voter_ballot_saved_id = 0
return self.delete_voter_ballot_saved(voter_ballot_saved_id, voter_id, google_civic_election_id)
def delete_voter_ballot_saved(self, voter_ballot_saved_id, voter_id=0, google_civic_election_id=0,
ballot_returned_we_vote_id="", ballot_location_shortcut=""):
"""
:param voter_ballot_saved_id:
:param voter_id:
:param google_civic_election_id:
:param ballot_returned_we_vote_id:
:param ballot_location_shortcut:
:return:
"""
voter_ballot_saved_found = False
voter_ballot_saved_deleted = False
voter_ballot_saved = None
status = ""
try:
if positive_value_exists(voter_ballot_saved_id):
voter_ballot_saved = VoterBallotSaved.objects.get(id=voter_ballot_saved_id)
# If still here, we found an existing voter_ballot_saved
voter_ballot_saved_found = True
success = True
status += "DELETE_VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_BALLOT_SAVED_ID "
elif positive_value_exists(voter_id) and positive_value_exists(google_civic_election_id):
voter_ballot_query = VoterBallotSaved.objects.filter(
voter_id=voter_id, google_civic_election_id=google_civic_election_id)
voter_ballot_list = list(voter_ballot_query)
for one_voter_ballot_saved in voter_ballot_list:
voter_ballot_saved_found = True
one_voter_ballot_saved.delete()
voter_ballot_saved_deleted = True
# If still here, we found an existing voter_ballot_saved
success = True
status += "DELETE_VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ID_AND_GOOGLE_CIVIC "
elif positive_value_exists(voter_id) and positive_value_exists(ballot_returned_we_vote_id):
voter_ballot_query = VoterBallotSaved.objects.filter(
voter_id=voter_id, ballot_returned_we_vote_id=ballot_returned_we_vote_id)
voter_ballot_list = list(voter_ballot_query)
for one_voter_ballot_saved in voter_ballot_list:
voter_ballot_saved_found = True
one_voter_ballot_saved.delete()
voter_ballot_saved_deleted = True
# If still here, we found an existing voter_ballot_saved
success = True
status += "DELETE_VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ID_AND_BALLOT_RETURNED_WE_VOTE_ID "
elif positive_value_exists(voter_id) and positive_value_exists(ballot_location_shortcut):
voter_ballot_query = VoterBallotSaved.objects.filter(
voter_id=voter_id, ballot_location_shortcut__iexact=ballot_location_shortcut)
voter_ballot_list = list(voter_ballot_query)
for one_voter_ballot_saved in voter_ballot_list:
voter_ballot_saved_found = True
one_voter_ballot_saved.delete()
voter_ballot_saved_deleted = True
# If still here, we found an existing voter_ballot_saved
success = True
status += "DELETE_VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ID_AND_BALLOT_LOCATION_SHORTCUT "
else:
voter_ballot_saved_found = False
success = False
status += "DELETE_VOTER_BALLOT_SAVED-COULD_NOT_RETRIEVE_VOTER_BALLOT_SAVED-MISSING_VARIABLES-DELETE "
except VoterBallotSaved.DoesNotExist:
success = True
status += "DELETE_VOTER_BALLOT_SAVED_NOT_FOUND "
except Exception as e:
success = False
status += "DELETE_VOTER_BALLOT_SAVED-CANNOT_DELETE "
if voter_ballot_saved_found:
try:
voter_ballot_saved.delete()
status += "DELETED "
voter_ballot_saved_deleted = True
except Exception as e:
success = False
status += "NOT_DELETED "
results = {
'success': success,
'status': status,
'voter_ballot_saved_deleted': voter_ballot_saved_deleted,
'voter_ballot_saved_found': voter_ballot_saved_found,
'voter_ballot_saved': voter_ballot_saved,
}
return results
def retrieve_voter_ballot_saved_by_id(self, voter_ballot_saved_id):
return self.retrieve_voter_ballot_saved(voter_ballot_saved_id)
def retrieve_voter_ballot_saved_by_voter_id(self, voter_id, google_civic_election_id):
voter_ballot_saved_id = 0
return self.retrieve_voter_ballot_saved(voter_ballot_saved_id, voter_id, google_civic_election_id)
def retrieve_voter_ballot_saved_by_ballot_returned_we_vote_id(self, voter_id, ballot_returned_we_vote_id):
voter_ballot_saved_id = 0
google_civic_election_id = 0
text_for_map_search = ''
return self.retrieve_voter_ballot_saved(
voter_ballot_saved_id, voter_id, google_civic_election_id, text_for_map_search, ballot_returned_we_vote_id)
def retrieve_voter_ballot_saved_by_ballot_location_shortcut(self, voter_id, ballot_location_shortcut):
voter_ballot_saved_id = 0
google_civic_election_id = 0
text_for_map_search = ''
ballot_returned_we_vote_id = ''
return self.retrieve_voter_ballot_saved(
voter_ballot_saved_id, voter_id, google_civic_election_id, text_for_map_search,
ballot_returned_we_vote_id, ballot_location_shortcut)
def retrieve_voter_ballot_saved_by_address_text(self, voter_id, text_for_map_search):
voter_ballot_saved_id = 0
google_civic_election_id = 0
return self.retrieve_voter_ballot_saved(voter_ballot_saved_id, voter_id, google_civic_election_id,
text_for_map_search)
def retrieve_voter_ballot_saved(self, voter_ballot_saved_id, voter_id=0, google_civic_election_id=0,
text_for_map_search='', ballot_returned_we_vote_id='', ballot_location_shortcut=''):
"""
:param voter_ballot_saved_id:
:param voter_id:
:param google_civic_election_id:
:param text_for_map_search:
:param ballot_returned_we_vote_id:
:param ballot_location_shortcut:
:return:
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_ballot_saved_found = False
voter_ballot_saved = None
status = ""
# Are we looking for a specific ballot?
specific_ballot_requested = positive_value_exists(ballot_returned_we_vote_id) or \
positive_value_exists(ballot_location_shortcut)
# Note: We are not using the 'readonly' here intentionally
try:
if positive_value_exists(voter_ballot_saved_id):
voter_ballot_saved = VoterBallotSaved.objects.get(id=voter_ballot_saved_id)
# If still here, we found an existing voter_ballot_saved
voter_ballot_saved_id = voter_ballot_saved.id
voter_ballot_saved_found = True if positive_value_exists(voter_ballot_saved_id) else False
success = True
status += "VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_BALLOT_SAVED_ID "
elif positive_value_exists(voter_id) and positive_value_exists(google_civic_election_id):
voter_ballot_saved = VoterBallotSaved.objects.get(
voter_id=voter_id, google_civic_election_id=google_civic_election_id)
# If still here, we found an existing voter_ballot_saved
voter_ballot_saved_id = voter_ballot_saved.id
voter_ballot_saved_found = True if positive_value_exists(voter_ballot_saved_id) else False
success = True
status += "VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ID_AND_GOOGLE_CIVIC "
elif positive_value_exists(voter_id) and positive_value_exists(ballot_returned_we_vote_id):
voter_ballot_saved = VoterBallotSaved.objects.get(
voter_id=voter_id, ballot_returned_we_vote_id__iexact=ballot_returned_we_vote_id)
# If still here, we found an existing voter_ballot_saved
voter_ballot_saved_id = voter_ballot_saved.id
voter_ballot_saved_found = True if positive_value_exists(voter_ballot_saved_id) else False
success = True
status += "VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ID_AND_BALLOT_RETURNED_ID "
elif positive_value_exists(voter_id) and positive_value_exists(ballot_location_shortcut):
voter_ballot_saved = VoterBallotSaved.objects.get(
voter_id=voter_id, ballot_location_shortcut__iexact=ballot_location_shortcut)
# If still here, we found an existing voter_ballot_saved
voter_ballot_saved_id = voter_ballot_saved.id
voter_ballot_saved_found = True if positive_value_exists(voter_ballot_saved_id) else False
success = True
status += "VOTER_BALLOT_SAVED_FOUND_FROM_VOTER_ID_AND_BALLOT_LOCATION_SHORTCUT "
else:
voter_ballot_saved_found = False
success = False
status += "COULD_NOT_RETRIEVE_VOTER_BALLOT_SAVED-MISSING_VARIABLES1 "
except VoterBallotSaved.MultipleObjectsReturned as e:
exception_multiple_object_returned = True
success = False
status += "MULTIPLE_VOTER_BALLOT_SAVED_FOUND-MUST_DELETE_ALL "
except VoterBallotSaved.DoesNotExist:
exception_does_not_exist = True
success = True
status += "VOTER_BALLOT_SAVED_NOT_FOUND1 "
# If here, a voter_ballot_saved not found yet, and not looking for specific ballot or
# a ballot by google_civic_election_id, then try to find list of entries saved under this address
# and return the most recent
if not voter_ballot_saved_found and not specific_ballot_requested and not \
positive_value_exists(google_civic_election_id):
try:
if positive_value_exists(text_for_map_search) and positive_value_exists(voter_id):
# Start with narrowest search
voter_ballot_saved_queryset = VoterBallotSaved.objects.all()
voter_ballot_saved_queryset = voter_ballot_saved_queryset.filter(
voter_id=voter_id, original_text_for_map_search__iexact=text_for_map_search)
# Return the latest google_civic_election_id first
voter_ballot_saved_queryset = voter_ballot_saved_queryset.order_by('-google_civic_election_id')
voter_ballot_saved = voter_ballot_saved_queryset.first()
status += "VOTER_BALLOT_SAVED_LIST_FOUND2 "
voter_ballot_saved_found = True
success = True
else:
voter_ballot_saved_found = False
success = False
status += "COULD_NOT_RETRIEVE_VOTER_BALLOT_SAVED-MISSING_VARIABLES2 "
except VoterBallotSaved.DoesNotExist:
exception_does_not_exist = True
success = True
status += "VOTER_BALLOT_SAVED_NOT_FOUND2 "
if positive_value_exists(voter_ballot_saved_found):
# If an address exists
if positive_value_exists(voter_ballot_saved.original_text_for_map_search):
# ...we want to make sure we have the city/state/zip breakdown
if not positive_value_exists(voter_ballot_saved.original_text_city) \
or not positive_value_exists(voter_ballot_saved.original_text_state) \
or not positive_value_exists(voter_ballot_saved.original_text_zip):
retrieve_results = \
retrieve_address_fields_from_geocoder(voter_ballot_saved.original_text_for_map_search)
if positive_value_exists(retrieve_results['success']):
try:
voter_ballot_saved.original_text_city = retrieve_results['city']
voter_ballot_saved.original_text_state = retrieve_results['state_code']
voter_ballot_saved.original_text_zip = retrieve_results['zip_long']
voter_ballot_saved.save()
status += "ORIGINAL_TEXT_UPDATED "
except Exception as e:
status += "COULD_NOT_SAVE_VOTER_BALLOT_SAVED-ORIGINAL_TEXT: " + str(e) + " "
# If a substituted address exists
if positive_value_exists(voter_ballot_saved.substituted_address_nearby):
# ...we want to make sure we have the city/state/zip breakdown
if not positive_value_exists(voter_ballot_saved.substituted_address_city) \
or not positive_value_exists(voter_ballot_saved.substituted_address_state) \
or not positive_value_exists(voter_ballot_saved.substituted_address_zip):
retrieve_results = \
retrieve_address_fields_from_geocoder(voter_ballot_saved.substituted_address_nearby)
if positive_value_exists(retrieve_results['success']):
try:
voter_ballot_saved.substituted_address_city = retrieve_results['city']
voter_ballot_saved.substituted_address_state = retrieve_results['state_code']
voter_ballot_saved.substituted_address_zip = retrieve_results['zip_long']
voter_ballot_saved.save()
status += "SUBSITUTED_ADDRESS_UPDATED "
except Exception as e:
status += "COULD_NOT_SAVE_VOTER_BALLOT_SAVED-SUBSTITUTED_ADDRESS: " + str(e) + " "
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_ballot_saved_found': voter_ballot_saved_found,
'voter_ballot_saved': voter_ballot_saved,
}
return results
def retrieve_voter_ballot_saved_list_for_election(self, google_civic_election_id,
polling_location_we_vote_id_source="",
state_code="",
find_only_entries_not_copied_from_polling_location=False,
find_all_entries_for_election=False):
status = ""
google_civic_election_id = convert_to_int(google_civic_election_id)
voter_ballot_saved_list = []
voter_ballot_saved_list_found = False
sufficient_variables_received = positive_value_exists(polling_location_we_vote_id_source) \
or find_only_entries_not_copied_from_polling_location or find_all_entries_for_election
if not positive_value_exists(google_civic_election_id) or not sufficient_variables_received:
status += "RETRIEVE_VOTER_BALLOT_SAVED_LIST-MISSING_REQUIRED_VARIABLE(S) "
results = {
'success': True if voter_ballot_saved_list_found else False,
'status': status,
'voter_ballot_saved_list_found': voter_ballot_saved_list_found,
'voter_ballot_saved_list': voter_ballot_saved_list,
}
return results
try:
voter_ballot_saved_queryset = VoterBallotSaved.objects.order_by('-id')
voter_ballot_saved_queryset = voter_ballot_saved_queryset.filter(
google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
voter_ballot_saved_queryset = voter_ballot_saved_queryset.filter(
state_code__iexact=state_code)
if positive_value_exists(polling_location_we_vote_id_source):
voter_ballot_saved_queryset = voter_ballot_saved_queryset.filter(
polling_location_we_vote_id_source__iexact=polling_location_we_vote_id_source)
elif positive_value_exists(find_only_entries_not_copied_from_polling_location):
voter_ballot_saved_queryset = voter_ballot_saved_queryset.filter(
Q(polling_location_we_vote_id_source=None) | Q(polling_location_we_vote_id_source=""))
voter_ballot_saved_list = list(voter_ballot_saved_queryset)
if len(voter_ballot_saved_list):
voter_ballot_saved_list_found = True
status += 'VOTER_BALLOT_SAVED_LIST_FOUND '
else:
status += 'NO_VOTER_BALLOT_SAVED_LIST_FOUND '
except VoterBallotSaved.DoesNotExist:
# No ballot items found. Not a problem.
status += 'NO_VOTER_BALLOT_SAVED_LIST_FOUND_DOES_NOT_EXIST '
voter_ballot_saved_list = []
except Exception as e:
handle_exception(e, logger=logger)
status += 'FAILED retrieve_voter_ballot_saved_list_for_election ' \
'{error} [type: {error_type}] '.format(error=e, error_type=type(e))
results = {
'success': True if voter_ballot_saved_list_found else False,
'status': status,
'voter_ballot_saved_list_found': voter_ballot_saved_list_found,
'voter_ballot_saved_list': voter_ballot_saved_list,
}
return results
def update_or_create_voter_ballot_saved(
self, voter_id,
google_civic_election_id,
state_code,
election_day_text,
election_description_text,
original_text_for_map_search,
substituted_address_nearby='',
is_from_substituted_address=False,
is_from_test_ballot=False,
polling_location_we_vote_id_source='',
ballot_location_display_name=None,
ballot_returned_we_vote_id=None,
ballot_location_shortcut='',
called_recursively=False,
original_text_city='',
original_text_state='',
original_text_zip='',
substituted_address_city='',
substituted_address_state='',
substituted_address_zip=''):
# We assume that we tried to find an entry for this voter
success = False
status = ""
voter_ballot_saved_found = False
ballot_location_shortcut = str(ballot_location_shortcut)
ballot_location_shortcut = ballot_location_shortcut.strip().lower()
try:
defaults = {
'ballot_location_display_name': ballot_location_display_name,
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'google_civic_election_id': google_civic_election_id,
'election_description_text': election_description_text,
'is_from_substituted_address': is_from_substituted_address,
'is_from_test_ballot': is_from_test_ballot,
'original_text_for_map_search': original_text_for_map_search,
'original_text_city': original_text_city,
'original_text_state': original_text_state,
'original_text_zip': original_text_zip,
'polling_location_we_vote_id_source': polling_location_we_vote_id_source,
'state_code': state_code,
'substituted_address_nearby': substituted_address_nearby,
'substituted_address_city': substituted_address_city,
'substituted_address_state': substituted_address_state,
'substituted_address_zip': substituted_address_zip,
'voter_id': voter_id,
}
if positive_value_exists(election_day_text):
defaults['election_date'] = election_day_text
if positive_value_exists(voter_id) and positive_value_exists(ballot_returned_we_vote_id):
status += "SAVING_WITH_VOTER_ID_AND_BALLOT_RETURNED_WE_VOTE_ID "
voter_ballot_saved, created = VoterBallotSaved.objects.update_or_create(
voter_id=voter_id,
ballot_returned_we_vote_id=ballot_returned_we_vote_id,
defaults=defaults,
)
voter_ballot_saved_found = voter_ballot_saved.id
status += "BALLOT_SAVED-ballot_returned_we_vote_id "
success = True
elif positive_value_exists(voter_id) and positive_value_exists(ballot_location_shortcut):
status += "SAVING_WITH_VOTER_ID_AND_BALLOT_LOCATION_SHORTCUT "
voter_ballot_saved, created = VoterBallotSaved.objects.update_or_create(
voter_id=voter_id,
ballot_location_shortcut=ballot_location_shortcut,
defaults=defaults,
)
voter_ballot_saved_found = voter_ballot_saved.id
status += "BALLOT_SAVED-BALLOT_LOCATION_SHORTCUT "
success = True
elif positive_value_exists(voter_id) and positive_value_exists(google_civic_election_id):
status += "SAVING_WITH_VOTER_ID_AND_GOOGLE_CIVIC_ELECTION_ID "
voter_ballot_saved, created = VoterBallotSaved.objects.update_or_create(
voter_id=voter_id,
google_civic_election_id=google_civic_election_id,
defaults=defaults,
)
voter_ballot_saved_found = voter_ballot_saved.id
status += "BALLOT_SAVED-VOTER_ID_AND_ELECTION_ID "
success = True
else:
voter_ballot_saved = None
status += "UNABLE_TO_CREATE_BALLOT_SAVED "
success = False
google_civic_election_id = 0
except VoterBallotSaved.MultipleObjectsReturned as e:
status += "EXCEPTION-MultipleObjectsReturned "
voter_ballot_saved = None
voter_ballot_saved_manager = VoterBallotSavedManager()
voter_ballot_saved_manager.delete_voter_ballot_saved(0, voter_id, google_civic_election_id,
ballot_returned_we_vote_id, ballot_location_shortcut)
if not positive_value_exists(called_recursively):
called_recursively = True
return self.update_or_create_voter_ballot_saved(
voter_id,
google_civic_election_id,
state_code,
election_day_text,
election_description_text,
original_text_for_map_search,
substituted_address_nearby,
is_from_substituted_address,
is_from_test_ballot,
polling_location_we_vote_id_source,
ballot_location_display_name,
ballot_returned_we_vote_id,
ballot_location_shortcut,
called_recursively,
original_text_city=original_text_city,
original_text_state=original_text_state,
original_text_zip=original_text_zip,
substituted_address_city=substituted_address_city,
substituted_address_state=substituted_address_state,
substituted_address_zip=substituted_address_zip)
except Exception as e:
status += 'UNABLE_TO_CREATE_BALLOT_SAVED_EXCEPTION: ' \
'{error} [type: {error_type}] '.format(error=e, error_type=type(e))
success = False
voter_ballot_saved = None
google_civic_election_id = 0
results = {
'status': status,
'success': success,
'voter_ballot_saved_found': voter_ballot_saved_found,
'voter_ballot_saved': voter_ballot_saved,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
}
return results
def copy_existing_ballot_items_from_stored_ballot(voter_id, text_for_map_search, google_civic_election_id=0,
ballot_returned_we_vote_id='', ballot_location_shortcut=''):
"""
We are looking for the most recent ballot near this voter. We may or may not have a google_civic_election_id
:param voter_id:
:param text_for_map_search:
:param google_civic_election_id:
:param ballot_returned_we_vote_id:
:param ballot_location_shortcut:
:return:
"""
status = ""
ballot_returned_manager = BallotReturnedManager()
voter_ballot_saved_manager = VoterBallotSavedManager()
ballot_item_list_manager = BallotItemListManager()
text_for_map_search_empty = not positive_value_exists(text_for_map_search) or text_for_map_search == ""
if positive_value_exists(ballot_returned_we_vote_id):
find_results = ballot_returned_manager.retrieve_ballot_returned_from_ballot_returned_we_vote_id(
ballot_returned_we_vote_id)
status += "CALLING-RETRIEVE_BALLOT_RETURNED_FROM_WE_VOTE_ID, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'substituted_address_city': '',
'substituted_address_state': '',
'substituted_address_zip': '',
'text_for_map_search': text_for_map_search,
'original_text_city': '',
'original_text_state': '',
'original_text_zip': '',
'voter_id': voter_id,
}
return error_results
# A specific ballot was found.
ballot_returned_to_copy = find_results['ballot_returned']
elif positive_value_exists(ballot_location_shortcut):
find_results = ballot_returned_manager.retrieve_ballot_returned_from_ballot_location_shortcut(
ballot_location_shortcut)
status += "CALLING-RETRIEVE_BALLOT_RETURNED_FROM_BALLOT_LOCATION_SHORTCUT, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'substituted_address_city': '',
'substituted_address_state': '',
'substituted_address_zip': '',
'text_for_map_search': text_for_map_search,
'original_text_city': '',
'original_text_state': '',
'original_text_zip': '',
'voter_id': voter_id,
}
return error_results
# A specific ballot was found.
ballot_returned_to_copy = find_results['ballot_returned']
elif positive_value_exists(google_civic_election_id) and text_for_map_search_empty:
find_results = ballot_returned_manager.retrieve_ballot_returned_from_google_civic_election_id(
google_civic_election_id)
status += "1-CALLING-RETRIEVE_BALLOT_RETURNED_FROM_GOOGLE_CIVIC_ELECTION_ID, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'substituted_address_city': '',
'substituted_address_state': '',
'substituted_address_zip': '',
'text_for_map_search': text_for_map_search,
'original_text_city': '',
'original_text_state': '',
'original_text_zip': '',
'voter_id': voter_id,
}
return error_results
# A specific ballot was found.
ballot_returned_to_copy = find_results['ballot_returned']
else:
find_results = ballot_returned_manager.find_closest_ballot_returned(
text_for_map_search, google_civic_election_id)
status += "CALLING-FIND_CLOSEST_BALLOT_RETURNED, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'substituted_address_city': '',
'substituted_address_state': '',
'substituted_address_zip': '',
'text_for_map_search': text_for_map_search,
'original_text_city': '',
'original_text_state': '',
'original_text_zip': '',
'voter_id': voter_id,
}
return error_results
# A ballot at a nearby address was found.
ballot_returned_to_copy = find_results['ballot_returned']
# Remove all prior ballot items, so we make room for copy_ballot_items to save ballot items
# 2017-11-03 We only want to delete if the ballot_returned in question has a polling_location_we_vote_id
if positive_value_exists(ballot_returned_to_copy.google_civic_election_id) and \
positive_value_exists(ballot_returned_to_copy.polling_location_we_vote_id):
voter_ballot_saved_id = 0
voter_ballot_saved_results = voter_ballot_saved_manager.delete_voter_ballot_saved(
voter_ballot_saved_id, voter_id, ballot_returned_to_copy.google_civic_election_id)
# We include a google_civic_election_id, so only the ballot info for this election is removed
ballot_item_list_manager.delete_all_ballot_items_for_voter(
voter_id, ballot_returned_to_copy.google_civic_election_id)
else:
status += "NOT_DELETED-voter_ballot_saved-AND-VOTER_BALLOT_ITEMS "
# ...and then copy it for the voter as long as it doesn't already belong to the voter
if ballot_returned_to_copy.voter_id != voter_id:
copy_item_results = ballot_item_list_manager.copy_ballot_items(ballot_returned_to_copy, voter_id)
status += copy_item_results['status']
if not copy_item_results['ballot_returned_copied']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'substituted_address_city': '',
'substituted_address_state': '',
'substituted_address_zip': '',
'text_for_map_search': text_for_map_search,
'original_text_city': '',
'original_text_state': '',
'original_text_zip': '',
'voter_id': voter_id,
}
return error_results
# VoterBallotSaved is updated outside of this function
results = {
'voter_id': voter_id,
'google_civic_election_id': ballot_returned_to_copy.google_civic_election_id,
'state_code': ballot_returned_to_copy.normalized_state,
'election_day_text': ballot_returned_to_copy.election_day_text(),
'election_description_text': ballot_returned_to_copy.election_description_text,
'text_for_map_search': ballot_returned_to_copy.text_for_map_search,
'original_text_city': ballot_returned_to_copy.normalized_city,
'original_text_state': ballot_returned_to_copy.normalized_state,
'original_text_zip': ballot_returned_to_copy.normalized_zip,
'substituted_address_nearby': ballot_returned_to_copy.text_for_map_search,
'substituted_address_city': ballot_returned_to_copy.normalized_city,
'substituted_address_state': ballot_returned_to_copy.normalized_state,
'substituted_address_zip': ballot_returned_to_copy.normalized_zip,
'ballot_returned_copied': True,
'ballot_location_display_name': ballot_returned_to_copy.ballot_location_display_name,
'ballot_returned_we_vote_id': ballot_returned_to_copy.we_vote_id,
'ballot_location_shortcut': ballot_returned_to_copy.ballot_location_shortcut if
ballot_returned_to_copy.ballot_location_shortcut else '',
'polling_location_we_vote_id_source': ballot_returned_to_copy.polling_location_we_vote_id,
'status': status,
}
return results
def refresh_ballot_items_for_voter_copied_from_one_polling_location(voter_id, ballot_returned_from_polling_location):
"""
:param voter_id:
:param ballot_returned_from_polling_location:
:return:
"""
success = True
status = ""
ballot_item_list_manager = BallotItemListManager()
google_civic_election_id = ballot_returned_from_polling_location.google_civic_election_id
if not positive_value_exists(voter_id):
success = False
status += "REFRESH_EXISTING_BALLOT_ITEMS_FOR_VOTER-NO_VOTER_ID "
error_results = {
'success': success,
'status': status,
'voter_id': voter_id,
'google_civic_election_id': google_civic_election_id,
'ballot_returned_copied': False,
'polling_location_we_vote_id_source': ballot_returned_from_polling_location.polling_location_we_vote_id,
}
return error_results
if not positive_value_exists(google_civic_election_id):
success = False
status += "REFRESH_EXISTING_BALLOT_ITEMS_FOR_VOTER-NO_GOOGLE_CIVIC_ELECTION_ID "
error_results = {
'success': success,
'status': status,
'voter_id': voter_id,
'google_civic_election_id': google_civic_election_id,
'ballot_returned_copied': False,
'polling_location_we_vote_id_source': ballot_returned_from_polling_location.polling_location_we_vote_id,
}
return error_results
# Remove all prior ballot items for this voter for this election, so we make room for
# copy_ballot_items to save ballot items
ballot_item_list_manager.delete_all_ballot_items_for_voter(
voter_id, ballot_returned_from_polling_location.google_civic_election_id)
# Copy the ballot items from the polling location over for the voter
copy_item_results = ballot_item_list_manager.copy_ballot_items(ballot_returned_from_polling_location, voter_id)
status += copy_item_results['status']
if not copy_item_results['ballot_returned_copied']:
success = False
status += "REFRESH_EXISTING_BALLOT_ITEMS_FOR_VOTER-FAILED_TO_COPY "
error_results = {
'success': success,
'status': status,
'voter_id': voter_id,
'google_civic_election_id': google_civic_election_id,
'ballot_returned_copied': False,
'polling_location_we_vote_id_source': ballot_returned_from_polling_location.polling_location_we_vote_id,
}
return error_results
results = {
'success': success,
'status': status,
'voter_id': voter_id,
'google_civic_election_id': google_civic_election_id,
'ballot_returned_copied': True,
'polling_location_we_vote_id_source': ballot_returned_from_polling_location.polling_location_we_vote_id,
}
return results
def retrieve_ballot_items_for_one_ballot_returned(voter_id, text_for_map_search, google_civic_election_id=0,
ballot_returned_we_vote_id='', ballot_location_shortcut=''):
"""
We are looking for the most recent ballot near this voter. We may or may not have a google_civic_election_id
:param voter_id:
:param text_for_map_search:
:param google_civic_election_id:
:param ballot_returned_we_vote_id:
:param ballot_location_shortcut:
:return:
"""
status = ""
ballot_returned_manager = BallotReturnedManager()
voter_ballot_saved_manager = VoterBallotSavedManager()
ballot_item_list_manager = BallotItemListManager()
text_for_map_search_empty = not positive_value_exists(text_for_map_search) or text_for_map_search != ""
if positive_value_exists(ballot_returned_we_vote_id):
find_results = ballot_returned_manager.retrieve_ballot_returned_from_ballot_returned_we_vote_id(
ballot_returned_we_vote_id)
status += "CALLING-RETRIEVE_BALLOT_RETURNED_FROM_WE_VOTE_ID, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'text_for_map_search': text_for_map_search,
'voter_id': voter_id,
}
return error_results
# A specific ballot was found.
ballot_returned = find_results['ballot_returned']
elif positive_value_exists(ballot_location_shortcut):
find_results = ballot_returned_manager.retrieve_ballot_returned_from_ballot_location_shortcut(
ballot_location_shortcut)
status += "CALLING-RETRIEVE_BALLOT_RETURNED_FROM_BALLOT_LOCATION_SHORTCUT, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'text_for_map_search': text_for_map_search,
'voter_id': voter_id,
}
return error_results
# A specific ballot was found.
ballot_returned = find_results['ballot_returned']
elif positive_value_exists(google_civic_election_id) and text_for_map_search_empty:
find_results = ballot_returned_manager.retrieve_ballot_returned_from_google_civic_election_id(
google_civic_election_id)
status += "2-CALLING-RETRIEVE_BALLOT_RETURNED_FROM_GOOGLE_CIVIC_ELECTION_ID, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'text_for_map_search': text_for_map_search,
'voter_id': voter_id,
}
return error_results
# A specific ballot was found.
ballot_returned = find_results['ballot_returned']
else:
find_results = ballot_returned_manager.find_closest_ballot_returned(
text_for_map_search, google_civic_election_id)
status += "CALLING-FIND_CLOSEST_BALLOT_RETURNED, status: [["
status += find_results['status']
status += "]] "
if not find_results['ballot_returned_found']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'text_for_map_search': text_for_map_search,
'voter_id': voter_id,
}
return error_results
# A ballot at a nearby address was found.
ballot_returned = find_results['ballot_returned']
# Remove all prior ballot items, so we make room for copy_ballot_items to save ballot items
# 2017-11-03 We only want to delete if the ballot_returned in question has a polling_location_we_vote_id
if positive_value_exists(ballot_returned.google_civic_election_id) and \
positive_value_exists(ballot_returned.polling_location_we_vote_id):
voter_ballot_saved_id = 0
voter_ballot_saved_results = voter_ballot_saved_manager.delete_voter_ballot_saved(
voter_ballot_saved_id, voter_id, ballot_returned.google_civic_election_id)
# We include a google_civic_election_id, so only the ballot info for this election is removed
ballot_item_list_manager.delete_all_ballot_items_for_voter(voter_id, ballot_returned.google_civic_election_id)
else:
status += "NOT_DELETED-voter_ballot_saved-AND-VOTER_BALLOT_ITEMS "
# ...and then copy it for the voter as long as it doesn't already belong to the voter
if ballot_returned.voter_id != voter_id:
copy_item_results = ballot_item_list_manager.copy_ballot_items(ballot_returned, voter_id)
status += copy_item_results['status']
if not copy_item_results['ballot_returned_copied']:
error_results = {
'ballot_returned_copied': False,
'ballot_location_display_name': '',
'ballot_returned_we_vote_id': ballot_returned_we_vote_id,
'ballot_location_shortcut': ballot_location_shortcut,
'election_day_text': '',
'election_description_text': '',
'google_civic_election_id': google_civic_election_id,
'polling_location_we_vote_id_source': '',
'state_code': '',
'status': status,
'substituted_address_nearby': '',
'text_for_map_search': text_for_map_search,
'voter_id': voter_id,
}
return error_results
# VoterBallotSaved is updated outside of this function
results = {
'voter_id': ballot_returned.voter_id,
'google_civic_election_id': ballot_returned.google_civic_election_id,
'state_code': ballot_returned.normalized_state,
'election_day_text': ballot_returned.election_day_text(),
'election_description_text': ballot_returned.election_description_text,
'text_for_map_search': ballot_returned.text_for_map_search,
'substituted_address_nearby': ballot_returned.text_for_map_search,
'ballot_returned_copied': True,
'ballot_location_display_name': ballot_returned.ballot_location_display_name,
'ballot_returned_we_vote_id': ballot_returned.we_vote_id,
'ballot_location_shortcut': ballot_returned.ballot_location_shortcut if
ballot_returned.ballot_location_shortcut else '',
'polling_location_we_vote_id_source': ballot_returned.polling_location_we_vote_id,
'status': status,
}
return results
def retrieve_address_fields_from_geocoder(text_for_map_search):
success = True
status = ""
city = ""
longitude = None
latitude = None
state_code = ""
zip_long = ""
try:
google_client = get_geocoder_for_service('google')(GOOGLE_MAPS_API_KEY)
location = google_client.geocode(text_for_map_search, sensor=False, timeout=GEOCODE_TIMEOUT)
if location is None:
status += 'REFRESH_ADDRESS_FIELDS: Could not find location matching "{}" '.format(text_for_map_search)
logger.debug(status)
else:
latitude = location.latitude
longitude = location.longitude
# Retrieve the ZIP code
if hasattr(location, 'raw'):
if 'address_components' in location.raw:
for one_address_component in location.raw['address_components']:
if 'administrative_area_level_1' in one_address_component['types'] \
and positive_value_exists(one_address_component['short_name']):
state_code = one_address_component['short_name']
if 'locality' in one_address_component['types'] \
and positive_value_exists(one_address_component['long_name']):
city = one_address_component['long_name']
if 'postal_code' in one_address_component['types'] \
and positive_value_exists(one_address_component['long_name']):
zip_long = one_address_component['long_name']
status += "GEOCODER_WORKED "
except Exception as e:
status += "RETRIEVE_ADDRESS_FIELDS_FROM_GEOCODER_FAILED " + str(e) + " "
results = {
'success': success,
'status': status,
'city': city,
'latitude': latitude,
'longitude': longitude,
'state_code': state_code,
'zip_long': zip_long,
}
return results
| {
"content_hash": "c8a7d7446c62c7ca7b5c56dab4c396c2",
"timestamp": "",
"source": "github",
"line_count": 3691,
"max_line_length": 120,
"avg_line_length": 52.931725819561095,
"alnum_prop": 0.5746451622809936,
"repo_name": "jainanisha90/WeVoteServer",
"id": "15b2b1f397ccb47dc10604483bec467944e34cc0",
"size": "195453",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ballot/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3612"
},
{
"name": "HTML",
"bytes": "1003027"
},
{
"name": "Python",
"bytes": "7489854"
},
{
"name": "Shell",
"bytes": "611"
}
],
"symlink_target": ""
} |
"""Deprecate AWS specific Account fields (role_name, number, s3_name)
Account.number is being replaced with Account.identifier.
Revision ID: 55725cc4bf25
Revises: 1c847ae1209a
Create Date: 2017-02-16 13:41:08.162000
"""
# revision identifiers, used by Alembic.
revision = '55725cc4bf25'
down_revision = '1c847ae1209a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Session = sessionmaker()
Base = declarative_base()
class Account(Base):
__tablename__ = 'account'
id = sa.Column(sa.Integer, primary_key=True)
account_type_id = sa.Column(sa.Integer())
identifier = sa.Column(sa.String())
s3_name = sa.Column(sa.String(64)) # (deprecated-custom)
role_name = sa.Column(sa.String(256)) # (deprecated-custom)
class AccountTypeCustomValues(Base):
"""
Defines the values for custom fields defined in AccountTypeCustomFields.
"""
__tablename__ = "account_type_values"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(64))
value = sa.Column(sa.String(256))
account_id = sa.Column(sa.Integer())
def update_custom_value(name, value, session, account_id):
if not value:
return
cv = session.query(AccountTypeCustomValues) \
.filter(AccountTypeCustomValues.account_id == account_id) \
.filter(AccountTypeCustomValues.name == name)
if cv.count():
cv = cv.one()
cv.value = value
else:
cv = AccountTypeCustomValues(name=name, value=value, account_id=account_id)
session.add(cv)
def update_from_custom_value(name, session, account):
cv = session.query(AccountTypeCustomValues) \
.filter(AccountTypeCustomValues.account_id == account.id) \
.filter(AccountTypeCustomValues.name == name)
if not cv.count():
return
cv = cv.one()
setattr(account, name, cv.value)
session.add(account)
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
# copy account.s3_name and account.role_name into custom values.
accounts = session.query(Account).all()
for account in accounts:
update_custom_value('s3_name', account.s3_name, session, account.id)
update_custom_value('role_name', account.role_name, session, account.id)
session.commit()
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('account', 'role_name')
op.drop_column('account', 'number')
op.drop_column('account', 's3_name')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('account', sa.Column('s3_name', sa.VARCHAR(length=64), autoincrement=False, nullable=True))
op.add_column('account', sa.Column('number', sa.VARCHAR(length=12), autoincrement=False, nullable=True))
op.add_column('account', sa.Column('role_name', sa.VARCHAR(length=256), autoincrement=False, nullable=True))
# ### end Alembic commands ###
bind = op.get_bind()
session = Session(bind=bind)
# copy custom values into account.s3_name and into account.role_name
accounts = session.query(Account).all()
for account in accounts:
update_from_custom_value('s3_name', session, account)
update_from_custom_value('role_name', session, account)
session.commit() | {
"content_hash": "0f793e2ada2f2815c59ef09dbeb63f12",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 112,
"avg_line_length": 30.62162162162162,
"alnum_prop": 0.6796116504854369,
"repo_name": "stackArmor/security_monkey",
"id": "c0e4fe48c6fa71f9db7674cda38ac9b04c7cc17b",
"size": "3399",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "migrations/versions/55725cc4bf25_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33462"
},
{
"name": "Dart",
"bytes": "137774"
},
{
"name": "Dockerfile",
"bytes": "3798"
},
{
"name": "HTML",
"bytes": "165572"
},
{
"name": "JavaScript",
"bytes": "984069"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1682110"
},
{
"name": "Shell",
"bytes": "29978"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import subprocess
import os
import os.path
import re
import warnings
import textwrap
import types
import sys
import stat
# Use the c version of ElementTree, which is faster, if possible:
try: from xml.etree import cElementTree as ElementTree
except ImportError: from xml.etree import ElementTree
from nltk import __file__
######################################################################
# Regular Expression Processing
######################################################################
def convert_regexp_to_nongrouping(pattern):
"""
Convert all grouping parentheses in the given regexp pattern to
non-grouping parentheses, and return the result. E.g.:
>>> from nltk.internals import convert_regexp_to_nongrouping
>>> convert_regexp_to_nongrouping('ab(c(x+)(z*))?d')
'ab(?:c(?:x+)(?:z*))?d'
:type pattern: str
:rtype: str
"""
# Sanity check: back-references are not allowed!
for s in re.findall(r'\\.|\(\?P=', pattern):
if s[1] in '0123456789' or s == '(?P=':
raise ValueError('Regular expressions with back-references '
'are not supported: %r' % pattern)
# This regexp substitution function replaces the string '('
# with the string '(?:', but otherwise makes no changes.
def subfunc(m):
return re.sub('^\((\?P<[^>]*>)?$', '(?:', m.group())
# Scan through the regular expression. If we see any backslashed
# characters, ignore them. If we see a named group, then
# replace it with "(?:". If we see any open parens that are part
# of an extension group, ignore those too. But if we see
# any other open paren, replace it with "(?:")
return re.sub(r'''(?x)
\\. | # Backslashed character
\(\?P<[^>]*> | # Named group
\(\? | # Extension group
\( # Grouping parenthesis''', subfunc, pattern)
##########################################################################
# Java Via Command-Line
##########################################################################
_java_bin = None
_java_options = []
# [xx] add classpath option to config_java?
def config_java(bin=None, options=None, verbose=True):
"""
Configure nltk's java interface, by letting nltk know where it can
find the Java binary, and what extra options (if any) should be
passed to Java when it is run.
:param bin: The full path to the Java binary. If not specified,
then nltk will search the system for a Java binary; and if
one is not found, it will raise a ``LookupError`` exception.
:type bin: str
:param options: A list of options that should be passed to the
Java binary when it is called. A common value is
``'-Xmx512m'``, which tells Java binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
:type options: list(str)
"""
global _java_bin, _java_options
_java_bin = find_binary('java', bin, env_vars=['JAVAHOME', 'JAVA_HOME'], verbose=verbose)
if options is not None:
if isinstance(options, basestring):
options = options.split()
_java_options = list(options)
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None,
blocking=True):
"""
Execute the given java command, by opening a subprocess that calls
Java. If java has not yet been configured, it will be configured
by calling ``config_java()`` with no arguments.
:param cmd: The java command that should be called, formatted as
a list of strings. Typically, the first string will be the name
of the java class; and the remaining strings will be arguments
for that java class.
:type cmd: list(str)
:param classpath: A ``':'`` separated list of directories, JAR
archives, and ZIP archives to search for class files.
:type classpath: str
:param stdin, stdout, stderr: Specify the executed programs'
standard input, standard output and standard error file
handles, respectively. Valid values are ``subprocess.PIPE``,
an existing file descriptor (a positive integer), an existing
file object, and None. ``subprocess.PIPE`` indicates that a
new pipe to the child should be created. With None, no
redirection will occur; the child's file handles will be
inherited from the parent. Additionally, stderr can be
``subprocess.STDOUT``, which indicates that the stderr data
from the applications should be captured into the same file
handle as for stdout.
:param blocking: If ``false``, then return immediately after
spawning the subprocess. In this case, the return value is
the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
:return: If ``blocking=True``, then return a tuple ``(stdout,
stderr)``, containing the stdout and stderr outputs generated
by the java command if the ``stdout`` and ``stderr`` parameters
were set to ``subprocess.PIPE``; or None otherwise. If
``blocking=False``, then return a ``subprocess.Popen`` object.
:raise OSError: If the java command returns a nonzero return code.
"""
if stdin == 'pipe': stdin = subprocess.PIPE
if stdout == 'pipe': stdout = subprocess.PIPE
if stderr == 'pipe': stderr = subprocess.PIPE
if isinstance(cmd, basestring):
raise TypeError('cmd should be a list of strings')
# Make sure we know where a java binary is.
if _java_bin is None:
config_java()
# Set up the classpath.
if classpath is None:
classpath = NLTK_JAR
else:
classpath += os.path.pathsep + NLTK_JAR
# Construct the full command string.
cmd = list(cmd)
cmd = ['-cp', classpath] + cmd
cmd = [_java_bin] + _java_options + cmd
# Call java via a subprocess
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
if not blocking: return p
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print(stderr)
raise OSError('Java command failed!')
return (stdout, stderr)
#: The location of the NLTK jar file, which is used to communicate
#: with external Java packages (such as Mallet) that do not have
#: a sufficiently powerful native command-line interface.
NLTK_JAR = os.path.abspath(os.path.join(os.path.split(__file__)[0],
'nltk.jar'))
if 0:
#config_java(options='-Xmx512m')
# Write:
#java('weka.classifiers.bayes.NaiveBayes',
# ['-d', '/tmp/names.model', '-t', '/tmp/train.arff'],
# classpath='/Users/edloper/Desktop/weka/weka.jar')
# Read:
(a,b) = java(['weka.classifiers.bayes.NaiveBayes',
'-l', '/tmp/names.model', '-T', '/tmp/test.arff',
'-p', '0'],#, '-distribution'],
classpath='/Users/edloper/Desktop/weka/weka.jar')
######################################################################
# Parsing
######################################################################
class ParseError(ValueError):
"""
Exception raised by parse_* functions when they fail.
:param position: The index in the input string where an error occurred.
:param expected: What was expected when an error occurred.
"""
def __init__(self, expected, position):
ValueError.__init__(self, expected, position)
self.expected = expected
self.position = position
def __str__(self):
return 'Expected %s at %s' % (self.expected, self.position)
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
def parse_str(s, start_position):
"""
If a Python string literal begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the string literal and the position where
it ends. Otherwise, raise a ``ParseError``.
"""
# Read the open quote, and any modifiers.
m = _STRING_START_RE.match(s, start_position)
if not m: raise ParseError('open quote', start_position)
quotemark = m.group(1)
# Find the close quote.
_STRING_END_RE = re.compile(r'\\|%s' % quotemark)
position = m.end()
while True:
match = _STRING_END_RE.search(s, position)
if not match: raise ParseError('close quote', position)
if match.group(0) == '\\': position = match.end()+1
else: break
# Parse it, using eval. Strings with invalid escape sequences
# might raise ValueEerror.
try:
return eval(s[start_position:match.end()]), match.end()
except ValueError as e:
raise ParseError('valid string (%s)' % e, start)
_PARSE_INT_RE = re.compile(r'-?\d+')
def parse_int(s, start_position):
"""
If an integer begins at the specified position in the given
string, then return a tuple ``(val, end_position)`` containing the
value of the integer and the position where it ends. Otherwise,
raise a ``ParseError``.
"""
m = _PARSE_INT_RE.match(s, start_position)
if not m: raise ParseError('integer', start_position)
return int(m.group()), m.end()
_PARSE_NUMBER_VALUE = re.compile(r'-?(\d*)([.]?\d*)?')
def parse_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the number and the position where it ends.
Otherwise, raise a ``ParseError``.
"""
m = _PARSE_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ParseError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end()
######################################################################
# Check if a method has been overridden
######################################################################
def overridden(method):
"""
:return: True if ``method`` overrides some method with the same
name in a base class. This is typically used when defining
abstract base classes or interfaces, to allow subclasses to define
either of two related methods:
>>> class EaterI:
... '''Subclass must define eat() or batch_eat().'''
... def eat(self, food):
... if overridden(self.batch_eat):
... return self.batch_eat([food])[0]
... else:
... raise NotImplementedError()
... def batch_eat(self, foods):
... return [self.eat(food) for food in foods]
:type method: instance method
"""
# [xx] breaks on classic classes!
if isinstance(method, types.MethodType) and method.im_class is not None:
name = method.__name__
funcs = [cls.__dict__[name]
for cls in _mro(method.im_class)
if name in cls.__dict__]
return len(funcs) > 1
else:
raise TypeError('Expected an instance method.')
def _mro(cls):
"""
Return the method resolution order for ``cls`` -- i.e., a list
containing ``cls`` and all its base classes, in the order in which
they would be checked by ``getattr``. For new-style classes, this
is just cls.__mro__. For classic classes, this can be obtained by
a depth-first left-to-right traversal of ``__bases__``.
"""
if isinstance(cls, type):
return cls.__mro__
else:
mro = [cls]
for base in cls.__bases__: mro.extend(_mro(base))
return mro
######################################################################
# Deprecation decorator & base class
######################################################################
# [xx] dedent msg first if it comes from a docstring.
def _add_epytext_field(obj, field, message):
"""Add an epytext @field to a given object's docstring."""
indent = ''
# If we already have a docstring, then add a blank line to separate
# it from the new field, and check its indentation.
if obj.__doc__:
obj.__doc__ = obj.__doc__.rstrip()+'\n\n'
indents = re.findall(r'(?<=\n)[ ]+(?!\s)', obj.__doc__.expandtabs())
if indents: indent = min(indents)
# If we don't have a docstring, add an empty one.
else:
obj.__doc__ = ''
obj.__doc__ += textwrap.fill('@%s: %s' % (field, message),
initial_indent=indent,
subsequent_indent=indent+' ')
def deprecated(message):
"""
A decorator used to mark functions as deprecated. This will cause
a warning to be printed the when the function is used. Usage:
>>> from nltk.internals import deprecated
>>> @deprecated('Use foo() instead')
... def bar(x):
... print x/10
"""
def decorator(func):
msg = ("Function %s() has been deprecated. %s"
% (func.__name__, message))
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
def newFunc(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# Copy the old function's name, docstring, & dict
newFunc.__dict__.update(func.__dict__)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__deprecated__ = True
# Add a @deprecated field to the docstring.
_add_epytext_field(newFunc, 'deprecated', message)
return newFunc
return decorator
class Deprecated(object):
"""
A base class used to mark deprecated classes. A typical usage is to
alert users that the name of a class has changed:
>>> from nltk.internals import Deprecated
>>> class NewClassName(object):
... pass # All logic goes here.
...
>>> class OldClassName(Deprecated, NewClassName):
... "Use NewClassName instead."
The docstring of the deprecated class will be used in the
deprecation warning message.
"""
def __new__(cls, *args, **kwargs):
# Figure out which class is the deprecated one.
dep_cls = None
for base in _mro(cls):
if Deprecated in base.__bases__:
dep_cls = base; break
assert dep_cls, 'Unable to determine which base is deprecated.'
# Construct an appropriate warning.
doc = dep_cls.__doc__ or ''.strip()
# If there's a @deprecated field, strip off the field marker.
doc = re.sub(r'\A\s*@deprecated:', r'', doc)
# Strip off any indentation.
doc = re.sub(r'(?m)^\s*', '', doc)
# Construct a 'name' string.
name = 'Class %s' % dep_cls.__name__
if cls != dep_cls:
name += ' (base class for %s)' % cls.__name__
# Put it all together.
msg = '%s has been deprecated. %s' % (name, doc)
# Wrap it.
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
# Do the actual work of __new__.
return object.__new__(cls, *args, **kwargs)
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
##########################################################################
# Search for files/binaries
##########################################################################
def find_file(filename, env_vars=(), searchpath=(),
file_names=None, url=None, verbose=True):
"""
Search for a file to be used by nltk.
:param filename: The name or path of the file.
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
if file_names is None: file_names = [filename]
assert isinstance(filename, basestring)
assert not isinstance(file_names, basestring)
assert not isinstance(searchpath, basestring)
if isinstance(env_vars, basestring):
env_vars = env_vars.split()
# File exists, no magic
if os.path.isfile(filename):
if verbose: print('[Found %s: %s]' % (filename, filename))
return filename
for alternative in file_names:
path_to_file = os.path.join(filename, alternative)
if os.path.isfile(path_to_file):
if verbose: print('[Found %s: %s]' % (filename, path_to_file))
return path_to_file
path_to_file = os.path.join(filename, 'file', alternative)
if os.path.isfile(path_to_file):
if verbose: print('[Found %s: %s]' % (filename, path_to_file))
return path_to_file
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
path_to_file = os.environ[env_var]
if os.path.isfile(path_to_file):
if verbose: print('[Found %s: %s]' % (filename, path_to_file))
return path_to_file
else:
for alternative in file_names:
path_to_file = os.path.join(os.environ[env_var],
alternative)
if os.path.isfile(path_to_file):
if verbose: print('[Found %s: %s]'%(filename, path_to_file))
return path_to_file
path_to_file = os.path.join(os.environ[env_var], 'file',
alternative)
if os.path.isfile(path_to_file):
if verbose: print('[Found %s: %s]'%(filename, path_to_file))
return path_to_file
# Check the path list.
for directory in searchpath:
for alternative in file_names:
path_to_file = os.path.join(directory, alternative)
if os.path.isfile(path_to_file):
return path_to_file
# If we're on a POSIX system, then try using the 'which' command
# to find the file.
if os.name == 'posix':
for alternative in file_names:
try:
p = subprocess.Popen(['which', alternative], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
path = stdout.strip()
if path.endswith(alternative) and os.path.exists(path):
if verbose: print('[Found %s: %s]' % (filename, path))
return path
except KeyboardInterrupt as SystemExit:
raise
except:
pass
msg = ("NLTK was unable to find the %s file!" "\nUse software specific "
"configuration paramaters" % filename)
if env_vars: msg += ' or set the %s environment variable' % env_vars[0]
msg += '.'
if searchpath:
msg += '\n\n Searched in:'
msg += ''.join('\n - %s' % d for d in searchpath)
if url: msg += ('\n\n For more information, on %s, see:\n <%s>' %
(filename, url))
div = '='*75
raise LookupError('\n\n%s\n%s\n%s' % (div, msg, div))
def find_binary(name, path_to_bin=None, env_vars=(), searchpath=(),
binary_names=None, url=None, verbose=True):
"""
Search for a file to be used by nltk.
:param name: The name or path of the file.
:param path_to_bin: The user-supplied binary location (deprecated)
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
return find_file(path_to_bin or name, env_vars, searchpath, binary_names,
url, verbose)
##########################################################################
# Find Java JAR files
# TODO: Add support for jar names specified as regular expressions
##########################################################################
def find_jar(name, path_to_jar=None, env_vars=(),
searchpath=(), url=None, verbose=True):
"""
Search for a jar that is used by nltk.
:param name: The name of the jar file
:param path_to_jar: The user-supplied jar location, or None.
:param env_vars: A list of environment variable names to check
in addition to the CLASSPATH variable which is
checked by default.
:param searchpath: List of directories to search.
"""
assert isinstance(name, basestring)
assert not isinstance(searchpath, basestring)
if isinstance(env_vars, basestring):
env_vars = env_vars.split()
# Make sure we check the CLASSPATH first
env_vars = ['CLASSPATH'] + list(env_vars)
# If an explicit location was given, then check it, and return it if
# it's present; otherwise, complain.
if path_to_jar is not None:
if os.path.isfile(path_to_jar):
return path_to_jar
raise ValueError('Could not find %s jar file at %s' %
(name, path_to_jar))
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if env_var == 'CLASSPATH':
classpath = os.environ['CLASSPATH']
for cp in classpath.split(os.path.pathsep):
if os.path.isfile(cp) and os.path.basename(cp) == name:
if verbose: print('[Found %s: %s]' % (name, cp))
return cp
else:
path_to_jar = os.environ[env_var]
if os.path.isfile(path_to_jar) and os.path.basename(path_to_jar) == name:
if verbose: print('[Found %s: %s]' % (name, path_to_jar))
return path_to_jar
# Check the path list.
for directory in searchpath:
path_to_jar = os.path.join(directory, name)
if os.path.isfile(path_to_jar):
if verbose: print('[Found %s: %s]' % (name, path_to_jar))
return path_to_jar
# If nothing was found, raise an error
msg = ("NLTK was unable to find %s!" % name)
if env_vars: msg += ' Set the %s environment variable' % env_vars[0]
msg = textwrap.fill(msg+'.', initial_indent=' ',
subsequent_indent=' ')
if searchpath:
msg += '\n\n Searched in:'
msg += ''.join('\n - %s' % d for d in searchpath)
if url: msg += ('\n\n For more information, on %s, see:\n <%s>' %
(name, url))
div = '='*75
raise LookupError('\n\n%s\n%s\n%s' % (div, msg, div))
##########################################################################
# Import Stdlib Module
##########################################################################
def import_from_stdlib(module):
"""
When python is run from within the nltk/ directory tree, the
current directory is included at the beginning of the search path.
Unfortunately, that means that modules within nltk can sometimes
shadow standard library modules. As an example, the stdlib
'inspect' module will attempt to import the stdlib 'tokenzie'
module, but will instead end up importing NLTK's 'tokenize' module
instead (causing the import to fail).
"""
old_path = sys.path
sys.path = [d for d in sys.path if d not in ('', '.')]
m = __import__(module)
sys.path = old_path
return m
##########################################################################
# Abstract declaration
##########################################################################
def abstract(func):
"""
A decorator used to mark methods as abstract. I.e., methods that
are marked by this decorator must be overridden by subclasses. If
an abstract method is called (either in the base class or in a
subclass that does not override the base class method), it will
raise ``NotImplementedError``.
"""
# Avoid problems caused by nltk.tokenize shadowing the stdlib tokenize:
inspect = import_from_stdlib('inspect')
# Read the function's signature.
args, varargs, varkw, defaults = inspect.getargspec(func)
# Create a new function with the same signature (minus defaults)
# that raises NotImplementedError.
msg = '%s is an abstract method.' % func.__name__
signature = inspect.formatargspec(args, varargs, varkw, ())
exec ('def newfunc%s: raise NotImplementedError(%r)' % (signature, msg))
# Substitute in the defaults after-the-fact, since eval(repr(val))
# may not work for some default values.
newfunc.func_defaults = func.func_defaults
# Copy the name and docstring
newfunc.__name__ = func.__name__
newfunc.__doc__ = func.__doc__
newfunc.__abstract__ = True
_add_epytext_field(newfunc, "note", "This method is abstract.")
# Return the function.
return newfunc
##########################################################################
# Wrapper for ElementTree Elements
##########################################################################
class ElementWrapper(object):
"""
A wrapper around ElementTree Element objects whose main purpose is
to provide nicer __repr__ and __str__ methods. In addition, any
of the wrapped Element's methods that return other Element objects
are overridden to wrap those values before returning them.
This makes Elements more convenient to work with in
interactive sessions and doctests, at the expense of some
efficiency.
"""
# Prevent double-wrapping:
def __new__(cls, etree):
"""
Create and return a wrapper around a given Element object.
If ``etree`` is an ``ElementWrapper``, then ``etree`` is
returned as-is.
"""
if isinstance(etree, ElementWrapper):
return etree
else:
return object.__new__(ElementWrapper, etree)
def __init__(self, etree):
"""
Initialize a new Element wrapper for ``etree``. If
``etree`` is a string, then it will be converted to an
Element object using ``ElementTree.fromstring()`` first.
"""
if isinstance(etree, basestring):
etree = ElementTree.fromstring(etree)
self.__dict__['_etree'] = etree
def unwrap(self):
"""
Return the Element object wrapped by this wrapper.
"""
return self._etree
##////////////////////////////////////////////////////////////
#{ String Representation
##////////////////////////////////////////////////////////////
def __repr__(self):
s = ElementTree.tostring(self._etree)
if len(s) > 60:
e = s.rfind('<')
if (len(s)-e) > 30: e = -20
s = '%s...%s' % (s[:30], s[e:])
return '<Element %r>' % s
def __str__(self):
"""
:return: the result of applying ``ElementTree.tostring()`` to
the wrapped Element object.
"""
return ElementTree.tostring(self._etree).rstrip()
##////////////////////////////////////////////////////////////
#{ Element interface Delegation (pass-through)
##////////////////////////////////////////////////////////////
def __getattr__(self, attrib):
return getattr(self._etree, attrib)
def __setattr__(self, attr, value):
return setattr(self._etree, attr, value)
def __delattr__(self, attr):
return delattr(self._etree, attr)
def __setitem__(self, index, element):
self._etree[index] = element
def __delitem__(self, index):
del self._etree[index]
def __setslice__(self, start, stop, elements):
self._etree[start:stop] = elements
def __delslice__(self, start, stop):
del self._etree[start:stop]
def __len__(self):
return len(self._etree)
##////////////////////////////////////////////////////////////
#{ Element interface Delegation (wrap result)
##////////////////////////////////////////////////////////////
def __getitem__(self, index):
return ElementWrapper(self._etree[index])
def __getslice__(self, start, stop):
return [ElementWrapper(elt) for elt in self._etree[start:stop]]
def getchildren(self):
return [ElementWrapper(elt) for elt in self._etree]
def getiterator(self, tag=None):
return (ElementWrapper(elt)
for elt in self._etree.getiterator(tag))
def makeelement(self, tag, attrib):
return ElementWrapper(self._etree.makeelement(tag, attrib))
def find(self, path):
elt = self._etree.find(path)
if elt is None: return elt
else: return ElementWrapper(elt)
def findall(self, path):
return [ElementWrapper(elt) for elt in self._etree.findall(path)]
######################################################################
# Helper for Handling Slicing
######################################################################
def slice_bounds(sequence, slice_obj, allow_step=False):
"""
Given a slice, return the corresponding (start, stop) bounds,
taking into account None indices and negative indices. The
following guarantees are made for the returned start and stop values:
- 0 <= start <= len(sequence)
- 0 <= stop <= len(sequence)
- start <= stop
:raise ValueError: If ``slice_obj.step`` is not None.
:param allow_step: If true, then the slice object may have a
non-None step. If it does, then return a tuple
(start, stop, step).
"""
start, stop = (slice_obj.start, slice_obj.stop)
# If allow_step is true, then include the step in our return
# value tuple.
if allow_step:
step = slice_obj.step
if step is None: step = 1
# Use a recursive call without allow_step to find the slice
# bounds. If step is negative, then the roles of start and
# stop (in terms of default values, etc), are swapped.
if step < 0:
start, stop = slice_bounds(sequence, slice(stop, start))
else:
start, stop = slice_bounds(sequence, slice(start, stop))
return start, stop, step
# Otherwise, make sure that no non-default step value is used.
elif slice_obj.step not in (None, 1):
raise ValueError('slices with steps are not supported by %s' %
sequence.__class__.__name__)
# Supply default offsets.
if start is None: start = 0
if stop is None: stop = len(sequence)
# Handle negative indices.
if start < 0: start = max(0, len(sequence)+start)
if stop < 0: stop = max(0, len(sequence)+stop)
# Make sure stop doesn't go past the end of the list. Note that
# we avoid calculating len(sequence) if possible, because for lazy
# sequences, calculating the length of a sequence can be expensive.
if stop > 0:
try: sequence[stop-1]
except IndexError: stop = len(sequence)
# Make sure start isn't past stop.
start = min(start, stop)
# That's all folks!
return start, stop
######################################################################
# Permission Checking
######################################################################
def is_writable(path):
# Ensure that it exists.
if not os.path.exists(path):
return False
# If we're on a posix system, check its permissions.
if hasattr(os, 'getuid'):
statdata = os.stat(path)
perm = stat.S_IMODE(statdata.st_mode)
# is it world-writable?
if (perm & 0002):
return True
# do we own it?
elif statdata.st_uid == os.getuid() and (perm & 0200):
return True
# are we in a group that can write to it?
elif statdata.st_gid == os.getgid() and (perm & 0020):
return True
# otherwise, we can't write to it.
else:
return False
# Otherwise, we'll assume it's writable.
# [xx] should we do other checks on other platforms?
return True
| {
"content_hash": "908365eed4dea61793b2102fe2859d28",
"timestamp": "",
"source": "github",
"line_count": 856,
"max_line_length": 93,
"avg_line_length": 38.48364485981308,
"alnum_prop": 0.560135996600085,
"repo_name": "jjhuff/fcc-comments",
"id": "ed8a6b621c0b7dd79c0b1e26fc051cf0504bb2e8",
"size": "33258",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/nltk/internals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1509"
},
{
"name": "JavaScript",
"bytes": "29380"
},
{
"name": "Python",
"bytes": "4323274"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used all the way back to
# python-2.4.
try:
import ipaddress
except ImportError:
ipaddress = None
__version__ = '3.5.0.1'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| {
"content_hash": "9930aa710a5ca8a5d7ad1525d03a9d3e",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 80,
"avg_line_length": 36.1948051948052,
"alnum_prop": 0.6194833153928956,
"repo_name": "amisrs/one-eighty",
"id": "06538ec6899adc761df2952cf9f2a01c3fe73aca",
"size": "5574",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "angular_flask/lib/python2.7/site-packages/backports/ssl_match_hostname/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1745293"
},
{
"name": "CSS",
"bytes": "23378"
},
{
"name": "HTML",
"bytes": "44161"
},
{
"name": "JavaScript",
"bytes": "53246"
},
{
"name": "Python",
"bytes": "18542714"
},
{
"name": "Shell",
"bytes": "6484"
}
],
"symlink_target": ""
} |
'text file write or read'
import os
#current path file
fname=''
ls=os.linesep
while True:
print '$$$$$ please select file operation\n'+'(1)read a text file'
print '(2)write to a text file\n'+'(x)exit the op'
op=raw_input('select op=')
if op=='x':
print 'closed'
break
#read a text file
elif op=='1':
print 'read op,please input file name'
iname=raw_input('filename=')
fname='./'+iname
if os.path.exists(fname)==False:
print "file not exist in",fname
try:
fobj=open(fname,'r')
except IOError,e:
print '** file open error:',e
else:
for eachline in fobj:
print eachline,
fobj.close()
print 'read file completed'
#write a text file
elif op=='2':
all=[]
print 'write op,please input file name'
iname=raw_input('filename=')
fname='./'+iname
print "Enter text ... when '.' quit"
while True:
en=raw_input('>')
if en=='.':
break
else:
all.append(en)
fobj=open(fname,'w')
fobj.writelines(['%s%s' % (x,ls) for x in all])
fobj.close()
print 'File Write complete'
else:
print 'select error,not 1,2 or x'
| {
"content_hash": "ea8dca40ae9abb707592d3bdfc74e1d5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 67,
"avg_line_length": 18.96551724137931,
"alnum_prop": 0.6181818181818182,
"repo_name": "shenhzou654321/TDDDemo",
"id": "33f19de0fea79da9239b13a7be8d661bf2754a04",
"size": "1119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythonNotes/textFileOp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "342544"
},
{
"name": "Python",
"bytes": "5562"
},
{
"name": "Shell",
"bytes": "5932"
}
],
"symlink_target": ""
} |
'''
he3.db.tools.mappers
contains mappers written to work with App Engine MapReduce
http://code.google.com/p/appengine-mapreduce/
Created on Jun 27, 2010
@author: Ben Davies, Helium 3 IT Solutions
'''
import google.appengine.ext.db as db
import mapreduce.operation as op
class Mapper (object):
'''A standard base class for Mappers defined here'''
@staticmethod
def process(data):
'''Standard method call to invoke mapper on mapped data. Intended to
be overriden.
'''
pass
class ModelHygieneMapper (Mapper):
'''A datastore mapper for performing common maintenance tasks on
datastore model entities.'''
@staticmethod
def process(entity):
'''Checks and repairs model integrity of the passed entity
1. Removes dangling references
2. Sets undefined datastore values to the default
'''
props = [x for x in entity.__class__.__dict__.values()\
if isinstance(x, db.Property)]
changed = False
for prop in props:
if not prop.get_value_for_datastore(entity):
prop.__set__(entity, prop.default_value())
changed = True
elif isinstance(prop, db.ReferenceProperty):
if not db.get(prop.get_value_for_datastore(entity)):
prop.__set__(entity, None)
changed = True
if changed: yield op.db.Put(entity)
| {
"content_hash": "d4909670dd91b6828e36d6eda7603fa4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 26.5,
"alnum_prop": 0.6777358490566038,
"repo_name": "chartjes/liesitoldmykids",
"id": "643436caea6e48bd2debb31e402a8239addacd08",
"size": "1325",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "he3/db/tools/mappers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "84537"
},
{
"name": "JavaScript",
"bytes": "6164"
},
{
"name": "Python",
"bytes": "810481"
}
],
"symlink_target": ""
} |
import pytz
from datetime import datetime, timedelta
from django.db.models.signals import post_save
from django.contrib.auth.models import Group, Permission
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.test.client import Client, RequestFactory
from django.utils.timezone import make_aware, now
import mock
from factory.django import mute_signals
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase, requires_login, requires_permission
from remo.profiles.tests import UserFactory
from remo.remozilla.models import Bug
from remo.remozilla.tests import BugFactory
from remo.voting.models import (Poll, PollComment, RadioPoll, RadioPollChoice,
RangePoll, RangePollChoice)
from remo.voting.tests import (PollCommentFactory, PollFactory, RadioPollFactory,
RadioPollChoiceFactory, RangePollFactory, RangePollChoiceFactory)
from remo.voting.views import view_voting
class ViewsTest(RemoTestCase):
"""Tests related to Voting Views."""
def setUp(self):
"""Initial data for the tests."""
self.nominee1, self.nominee2, self.nominee3 = UserFactory.create_batch(3, groups=['Rep'])
self.rep = UserFactory.create(groups=['Rep'])
self.admin = UserFactory.create(groups=['Admin', 'Rep'])
self.mozillian = UserFactory.create(groups=['Mozillians'])
poll_start = now() + timedelta(days=5)
self.admin_group = Group.objects.get(name='Admin')
self.rep_group = Group.objects.get(name='Rep')
self.poll = PollFactory.create(valid_groups=self.admin_group,
start=poll_start,
end=poll_start + timedelta(days=10),
comments_allowed=False)
self.range_poll = RangePollFactory(poll=self.poll)
self.range_poll_choice1 = RangePollChoiceFactory(range_poll=self.range_poll,
nominee=self.nominee1)
self.range_poll_choice2 = RangePollChoiceFactory(range_poll=self.range_poll,
nominee=self.nominee2)
self.radio_poll = RadioPollFactory(poll=self.poll)
self.radio_poll_choice1, self.radio_poll_choice2 = RadioPollChoiceFactory.create_batch(
2, radio_poll=self.radio_poll)
self.post_data = {'range_poll__1': 1,
'range_poll__2': 2,
'radio_poll__1': 2}
self.edit_future_data = {
'name': u'Test edit voting',
'description': u'This is a description.',
'created_by': self.poll.created_by.id,
'valid_groups': self.admin_group.id,
'start_form_0_year': now().year + 1,
'start_form_0_month': 10,
'start_form_0_day': 1,
'start_form_1_hour': 7,
'start_form_1_minute': 00,
'end_form_0_year': now().year + 1,
'end_form_0_month': 10,
'end_form_0_day': 4,
'end_form_1_hour': 7,
'end_form_1_minute': 00,
'range_polls-TOTAL_FORMS': u'1',
'range_polls-INITIAL_FORMS': u'1',
'range_polls-MAX_NUM_FORMS': u'1000',
'range_polls-0-id': self.range_poll.id,
'range_polls-0-name': u'Current Range Poll 1',
'{0}_range_choices-0-id'.format(self.range_poll.id): self.range_poll_choice1.id,
'{0}_range_choices-0-nominee'.format(self.range_poll.id): self.nominee1.id,
'{0}_range_choices-0-DELETE'.format(self.range_poll.id): False,
'{0}_range_choices-1-id'.format(self.range_poll.id): self.range_poll_choice2.id,
'{0}_range_choices-1-nominee'.format(self.range_poll.id): self.nominee2.id,
'{0}_range_choices-1-DELETE'.format(self.range_poll.id): False,
'{0}_range_choices-2-id'.format(self.range_poll.id): u'',
'{0}_range_choices-2-nominee'.format(self.range_poll.id): self.nominee3.id,
'{0}_range_choices-2-DELETE'.format(self.range_poll.id): False,
'{0}_range_choices-TOTAL_FORMS'.format(self.range_poll.id): u'3',
'{0}_range_choices-INITIAL_FORMS'.format(self.range_poll.id): u'2',
'{0}_range_choices-TOTAL_FORMS'.format(self.range_poll.id): u'1000',
'radio_polls-0-id': self.radio_poll.id,
'radio_polls-0-question': u'Radio Poll - Question 1',
'radio_polls-TOTAL_FORMS': u'1',
'radio_polls-INITIAL_FORMS': u'1',
'radio_polls-MAX_NUM_FORMS': u'1000',
'{0}_radio_choices-TOTAL_FORMS'.format(self.radio_poll.id): u'2',
'{0}_radio_choices-INITIAL_FORMS'.format(self.radio_poll.id): u'2',
'{0}_radio_choices-MAX_NUM_FORMS'.format(self.radio_poll.id): u'1000',
'{0}_radio_choices-0-id'.format(self.radio_poll.id): self.radio_poll_choice1.id,
'{0}_radio_choices-0-answer'.format(self.radio_poll.id): u'Radio Poll - Answer 1',
'{0}_radio_choices-0-DELETE'.format(self.radio_poll.id): False,
'{0}_radio_choices-1-id'.format(self.radio_poll.id): self.radio_poll_choice2.id,
'{0}_radio_choices-1-answer'.format(self.radio_poll.id): u'Radio Poll - Answer 2',
'{0}_radio_choices-1-DELETE'.format(self.radio_poll.id): False}
self.edit_current_data = {
'name': u'Test edit voting',
'description': u'This is a description.',
'created_by': self.nominee1.id,
'valid_groups': self.admin_group.id,
'start_form_0_year': 2011,
'end_form_0_year': now().year,
'end_form_0_month': 10,
'end_form_0_day': 4,
'end_form_1_hour': 7,
'end_form_1_minute': 00}
# Give permissions to admin group
group = Group.objects.get(name='Admin')
permissions = Permission.objects.filter(name__icontains='poll')
for perm in permissions:
group.permissions.add(perm)
def test_view_list_votings(self):
"""Get list votings page."""
# Get as anonymous user.
client = Client()
response = client.get(reverse('voting_list_votings'), follow=True)
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'main.jinja')
# Get as logged in Rep.
with self.login(self.rep) as client:
response = client.get(reverse('voting_list_votings'))
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
@mock.patch('remo.voting.views.messages')
def test_view_current_voting(self, faked_message):
"""View a voting."""
rep_group = Group.objects.get(name='Rep')
poll_start = now() - timedelta(days=5)
poll = PollFactory.create(valid_groups=rep_group,
end=poll_start + timedelta(days=10))
# Anonymous user.
c = Client()
response = c.get(poll.get_absolute_url(), follow=True)
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'main.jinja')
# Logged in user.
with self.login(self.rep) as client:
response = client.get(poll.get_absolute_url())
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'vote_voting.jinja')
# # Logged in as a mozillian user - not valid voting group.
with self.login(self.mozillian) as client:
response = client.get(poll.get_absolute_url(), follow=True)
faked_message.error.assert_called_once_with(
mock.ANY, 'You do not have the permissions to vote on this voting.')
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
@mock.patch('remo.voting.views.messages')
def test_view_cast_a_vote(self, fake_messages):
"""Cast a vote on a voting."""
UserFactory.create(username='remobot')
poll_start = now() - timedelta(days=5)
poll = PollFactory.create(valid_groups=self.rep_group, start=poll_start,
end=poll_start + timedelta(days=10),
comments_allowed=False)
# Cast a vote as a valid user.
with self.login(self.rep) as client:
response = client.post(poll.get_absolute_url(), self.post_data)
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
fake_messages.success.assert_called_once_with(
mock.ANY, 'Your vote has been successfully registered.')
# Ensure that there is a vote for user 'rep'
poll = Poll.objects.get(id=poll.id)
eq_(poll.users_voted.filter(username=self.rep.username).count(), 1)
# Cast a vote as a valid user for a second time.
with self.login(self.rep) as client:
response = client.post(poll.get_absolute_url(), self.post_data, follow=True)
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
fake_messages.warning.assert_called_once_with(
mock.ANY, ('You have already cast your vote for this voting. '
'Come back to see the results on %s UTC.'
% poll.end.strftime('%Y %B %d, %H:%M')))
eq_(poll.users_voted.filter(username=self.rep.username).count(), 1)
# Cast a vote as an invalid user.
with self.login(self.mozillian) as client:
response = client.post(poll.get_absolute_url(), self.post_data, follow=True)
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
fake_messages.error.assert_called_once_with(
mock.ANY, ('You do not have the permissions to vote '
'on this voting.'))
eq_(poll.users_voted.filter(username=self.mozillian.username).count(), 0)
@mock.patch('remo.voting.views.messages')
def test_view_post_a_comment(self, fake_messages):
"""Post a comment on an automated poll."""
poll_start = now() - timedelta(days=5)
poll_user = UserFactory.create(groups=['Council'])
poll_group = Group.objects.get(name='Council')
bug = BugFactory.create()
swag_poll = PollFactory.create(name='swag poll', start=poll_start,
end=poll_start + timedelta(days=15),
created_by=poll_user,
valid_groups=poll_group,
bug=bug,
automated_poll=True,
description='Swag poll description.',
slug='swag-poll')
vote_url = reverse('voting_view_voting',
kwargs={'slug': 'swag-poll'})
factory = RequestFactory()
request = factory.post(vote_url, {'comment': 'This is a comment'},
follow=True)
request.user = poll_user
view_voting(request, slug=swag_poll.slug)
poll_comment = PollComment.objects.get(poll=swag_poll)
eq_(poll_comment.user, poll_user)
eq_(poll_comment.comment, 'This is a comment')
fake_messages.success.assert_called_once_with(
mock.ANY, 'Comment saved successfully.')
@mock.patch('remo.voting.views.messages')
def test_view_voting_results(self, faked_message):
"""View the results of a voting."""
poll_start = now() - timedelta(days=5)
poll = PollFactory.create(valid_groups=self.rep_group,
start=poll_start,
end=poll_start - timedelta(days=3),
comments_allowed=False)
# Anonymous user.
client = Client()
response = client.get(poll.get_absolute_url(), follow=True)
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'main.jinja')
# Logged in user.
with self.login(self.rep) as client:
response = client.get(poll.get_absolute_url())
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'view_voting.jinja')
# Logged in user, invalid voting group.
with self.login(self.mozillian) as client:
response = client.get(poll.get_absolute_url())
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
faked_message.error.assert_called_once_with(
mock.ANY, 'You do not have the permissions to vote on this voting.')
def test_view_future_voting(self):
"""View a voting planned to start in the future."""
poll_start = now() + timedelta(days=5)
poll = PollFactory.create(valid_groups=self.rep_group,
start=poll_start,
end=poll_start + timedelta(days=10),
comments_allowed=False)
with self.login(self.rep) as client:
response = client.get(poll.get_absolute_url())
self.assertJinja2TemplateUsed(response, 'vote_voting.jinja')
def test_view_edit_future_voting(self):
"""Edit future voting test."""
# logged in as a non-admin user.
with mock.patch('remo.base.decorators.messages.error') as faked_message:
with self.login(self.rep) as client:
response = client.post(reverse('voting_edit_voting',
kwargs={'slug': self.poll.slug}),
self.edit_future_data,
follow=True)
eq_(response.request['PATH_INFO'], '/')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Permission denied.')
# Logged in as administrator.
with mock.patch('remo.voting.views.messages.success') as faked_message:
with self.login(self.admin) as client:
response = client.post(reverse('voting_edit_voting',
kwargs={'slug': self.poll.slug}),
self.edit_future_data)
eq_(response.request['PATH_INFO'],
reverse('voting_edit_voting', kwargs={'slug': self.poll.slug}))
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Voting successfully edited.')
# Ensure voting data get saved.
poll = Poll.objects.get(name='Test edit voting')
# Test fields with the same name in POST data and models.
excluded = ['valid_groups', 'created_by']
for field in set(self.edit_future_data).difference(set(excluded)):
if getattr(poll, field, None):
eq_(getattr(poll, field), self.edit_future_data[field])
# Test excluded fields.
eq_(self.edit_future_data['valid_groups'], poll.valid_groups.id)
eq_(self.edit_future_data['created_by'], poll.created_by.id)
# Ensure Range/Radio Polls are saved.
range_poll = RangePoll.objects.get(poll_id=poll.id)
nominees = []
for choice in RangePollChoice.objects.filter(range_poll_id=range_poll.id):
nominees.append(choice.nominee.id)
# Ensure that the nominees saved in the poll are the same with the POST data
eq_(set([self.nominee1.id, self.nominee2.id, self.nominee3.id]), set(nominees))
name = self.edit_future_data['range_polls-0-name']
eq_(name, range_poll.name)
radio_poll = RadioPoll.objects.get(poll_id=poll.id)
answers = []
for choice in RadioPollChoice.objects.filter(radio_poll_id=radio_poll.id):
answers.append(choice.answer)
eq_(set(['Radio Poll - Answer 1', 'Radio Poll - Answer 2']), set(answers))
question = self.edit_future_data['radio_polls-0-question']
eq_(question, radio_poll.question)
# Ensure voting start/end is saved.
month = self.edit_future_data['start_form_0_month']
day = self.edit_future_data['start_form_0_day']
year = self.edit_future_data['start_form_0_year']
hour = self.edit_future_data['start_form_1_hour']
minute = self.edit_future_data['start_form_1_minute']
start = datetime(year, month, day, hour, minute)
eq_(make_aware(start, pytz.UTC), poll.start)
month = self.edit_future_data['end_form_0_month']
day = self.edit_future_data['end_form_0_day']
year = self.edit_future_data['end_form_0_year']
hour = self.edit_future_data['end_form_1_hour']
minute = self.edit_future_data['end_form_1_minute']
end = datetime(year, month, day, hour, minute)
eq_(make_aware(end, pytz.UTC), poll.end)
def test_view_edit_current_voting(self):
"""Test current voting test."""
poll_start = now() - timedelta(days=5)
poll = PollFactory.create(valid_groups=self.admin_group, start=poll_start,
end=poll_start + timedelta(days=10),
comments_allowed=False,
created_by=self.nominee1)
# Logged in as a non-admin user.
with mock.patch('remo.base.decorators.messages.error') as faked_message:
with self.login(self.rep) as client:
response = client.post(reverse('voting_edit_voting', kwargs={'slug': poll.slug}),
self.edit_current_data, follow=True)
eq_(response.request['PATH_INFO'], '/')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Permission denied.')
# Logged in as administrator.
with mock.patch('remo.voting.views.messages.success') as faked_message:
with self.login(self.admin) as client:
response = client.post(reverse('voting_edit_voting', kwargs={'slug': poll.slug}),
self.edit_current_data, follow=True)
eq_(response.request['PATH_INFO'],
reverse('voting_edit_voting', kwargs={'slug': poll.slug}))
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Voting successfully edited.')
# Ensure voting data get saved.
poll = Poll.objects.get(name='Test edit voting')
# Test fields with the same name in POST data and models.
excluded = ['valid_groups', 'created_by']
for field in set(self.edit_current_data).difference(set(excluded)):
if getattr(poll, field, None):
eq_(getattr(poll, field), self.edit_current_data[field])
# Test excluded fields.
eq_(self.edit_current_data['created_by'], poll.created_by.id)
eq_(self.edit_current_data['valid_groups'], poll.valid_groups.id)
# Ensure voting end is saved.
month = self.edit_current_data['end_form_0_month']
day = self.edit_current_data['end_form_0_day']
year = self.edit_current_data['end_form_0_year']
hour = self.edit_current_data['end_form_1_hour']
minute = self.edit_current_data['end_form_1_minute']
end = datetime(year, month, day, hour, minute)
eq_(make_aware(end, pytz.UTC), poll.end)
start_year = self.edit_current_data['start_form_0_year']
self.assertNotEqual(poll.start.year, start_year)
def test_view_edit_voting(self):
"""Test view edit voting."""
poll_start = now() - timedelta(days=5)
poll = PollFactory.create(valid_groups=self.admin_group, start=poll_start,
end=poll_start + timedelta(days=10),
comments_allowed=False,
created_by=self.nominee1)
# Anonymous user
c = Client()
response = c.get(reverse('voting_edit_voting', kwargs={'slug': poll.slug}), follow=True)
eq_(response.status_code, 200)
self.assertJinja2TemplateUsed(response, 'main.jinja')
# Logged in user.
with mock.patch('remo.base.decorators.messages.error') as faked_message:
with self.login(self.rep) as client:
response = client.get(reverse('voting_edit_voting', kwargs={'slug': poll.slug}),
follow=True)
self.assertJinja2TemplateUsed(response, 'main.jinja')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Permission denied.')
# Logged in as admin
with self.login(self.admin) as client:
response = client.get(reverse('voting_edit_voting', kwargs={'slug': poll.slug}))
self.assertJinja2TemplateUsed(response, 'edit_voting.jinja')
def test_view_delete_voting(self):
"""Test delete voting."""
# Anonymous user.
with mock.patch('remo.base.decorators.messages.warning') as faked_message:
c = Client()
response = c.get(reverse('voting_delete_voting',
kwargs={'slug': self.poll.slug}),
follow=True)
self.assertJinja2TemplateUsed(response, 'main.jinja')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Please login.')
# Valid user with no permissions.
with mock.patch('remo.base.decorators.messages.error') as faked_message:
with self.login(self.rep) as client:
response = client.get(reverse('voting_delete_voting',
kwargs={'slug': self.poll.slug}),
follow=True)
self.assertJinja2TemplateUsed(response, 'main.jinja')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Permission denied.')
# Login as administrator.
with mock.patch('remo.voting.views.messages.success') as faked_message:
with self.login(self.admin) as client:
response = client.post(reverse('voting_delete_voting',
kwargs={'slug': self.poll.slug}),
follow=True)
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Voting successfully deleted.')
def test_view_delete_automated_poll(self):
with mute_signals(post_save):
poll_start = now() - timedelta(days=5)
poll_user = UserFactory.create(groups=['Review'])
poll_group = Group.objects.get(name='Review')
bug = BugFactory.create()
swag_poll = PollFactory.create(name='swag poll', start=poll_start,
end=poll_start + timedelta(days=15),
created_by=poll_user,
valid_groups=poll_group,
bug=bug,
automated_poll=True,
description='Swag poll description.',
slug='swag-poll')
with mock.patch('remo.voting.views.messages.success') as faked_message:
with self.login(self.admin) as client:
response = client.post(reverse('voting_delete_voting',
kwargs={'slug': swag_poll.slug}),
follow=True)
self.assertJinja2TemplateUsed(response, 'list_votings.jinja')
ok_(faked_message.called)
eq_(faked_message.call_args_list[0][0][1], 'Voting successfully deleted.')
ok_(not Poll.objects.filter(id=swag_poll.id).exists())
ok_(not Bug.objects.filter(id=bug.id).exists())
class VotingCommentingSystem(RemoTestCase):
@mock.patch('remo.voting.views.messages.success')
@mock.patch('remo.voting.views.forms.PollCommentForm')
def test_post_a_comment(self, form_mock, messages_mock):
user = UserFactory.create(groups=['Rep'])
group = Group.objects.get(name='Rep')
poll = PollFactory.create(created_by=user, valid_groups=group)
form_mock.is_valid.return_value = True
with self.login(user) as client:
response = client.post(poll.get_absolute_url(),
user=user,
data={'comment': 'This is a comment'})
eq_(response.status_code, 200)
messages_mock.assert_called_with(
mock.ANY, 'Comment saved successfully.')
ok_(form_mock().save.called)
eq_(response.context['poll'], poll)
self.assertJinja2TemplateUsed(response, 'vote_voting.jinja')
@mock.patch('remo.voting.views.redirect', wraps=redirect)
def test_delete_as_owner(self, redirect_mock):
user = UserFactory.create(groups=['Rep'])
group = Group.objects.get(name='Rep')
poll = PollFactory.create(created_by=user, valid_groups=group)
comment = PollCommentFactory.create(poll=poll, user=user,
comment='This is a comment')
with self.login(user) as client:
client.post(comment.get_absolute_delete_url(), user=comment.user)
ok_(not PollComment.objects.filter(pk=comment.id).exists())
redirect_mock.assert_called_with(poll.get_absolute_url())
@requires_login()
def test_delete_as_anonymous(self):
comment = PollCommentFactory.create()
client = Client()
client.post(comment.get_absolute_delete_url(), data={})
ok_(PollComment.objects.filter(pk=comment.id).exists())
@requires_permission()
def test_delete_as_other_rep(self):
user = UserFactory.create(groups=['Rep'])
group = Group.objects.get(name='Rep')
poll = PollFactory.create(created_by=user, valid_groups=group)
comment = PollCommentFactory.create(poll=poll, user=user,
comment='This is a comment')
other_rep = UserFactory.create(groups=['Rep'])
with self.login(other_rep) as client:
client.post(comment.get_absolute_delete_url(), user=other_rep)
ok_(PollComment.objects.filter(pk=comment.id).exists())
@mock.patch('remo.reports.views.redirect', wraps=redirect)
def test_delete_as_admin(self, redirect_mock):
user = UserFactory.create(groups=['Admin'])
comment = PollCommentFactory.create()
with self.login(user) as client:
client.post(comment.get_absolute_delete_url(), user=user)
ok_(not PollComment.objects.filter(pk=comment.id).exists())
| {
"content_hash": "695ca9ed42aa73378ae5ad493ba90d3b",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 97,
"avg_line_length": 49.32664233576642,
"alnum_prop": 0.5802227072620325,
"repo_name": "mozilla/remo",
"id": "c73ca19739c95a2bfd1052ffc2221684c61d4673",
"size": "27031",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "remo/voting/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "228359"
},
{
"name": "HTML",
"bytes": "325696"
},
{
"name": "JavaScript",
"bytes": "288713"
},
{
"name": "Python",
"bytes": "763657"
},
{
"name": "Shell",
"bytes": "648"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that we have proper strings like Copyright notices on all the
right files in our distributions.
Note that this is a source file and packaging test, not a functional test,
so the name of this script doesn't end in *Tests.py.
"""
import fnmatch
import os
import os.path
import re
import TestCmd
import TestSCons
# Use TestCmd, not TestSCons, so we don't chdir to a temporary directory.
test = TestCmd.TestCmd()
scons_version = TestSCons.SConsVersion
def build_path(*args):
return os.path.join('build', *args)
build_scons = build_path('scons')
build_local = build_path('scons-local', 'scons-local-'+scons_version)
build_src = build_path('scons-src')
class Checker(object):
def __init__(self, directory,
search_list = [],
remove_list = [],
remove_patterns = []):
self.directory = directory
self.search_list = search_list
self.remove_dict = {}
for r in remove_list:
self.remove_dict[os.path.join(directory, r)] = 1
self.remove_patterns = remove_patterns
def directory_exists(self):
return os.path.exists(self.directory)
def remove_this(self, name, path):
if self.remove_dict.get(path):
return 1
else:
for pattern in self.remove_patterns:
if fnmatch.fnmatch(name, pattern):
return 1
return 0
def search_this(self, path):
if self.search_list:
for pattern in self.search_list:
if fnmatch.fnmatch(path, pattern):
return 1
return None
else:
return os.path.isfile(path)
def find_missing(self):
result = []
for dirpath, dirnames, filenames in os.walk(self.directory):
if '.svn' in dirnames:
dirnames.remove('.svn')
for dname in dirnames[:]:
dpath = os.path.join(dirpath, dname)
if self.remove_this(dname, dpath):
dirnames.remove(dname)
for fname in filenames:
fpath = os.path.join(dirpath, fname)
if self.search_this(fpath) and not self.remove_this(fname, fpath):
body = open(fpath, 'r').read()
for expr in self.expressions:
if not expr.search(body):
msg = '%s: missing %s' % (fpath, repr(expr.pattern))
result.append(msg)
return result
class CheckUnexpandedStrings(Checker):
expressions = [
re.compile('__COPYRIGHT__'),
re.compile('__FILE__ __REVISION__ __DATE__ __DEVELOPER__'),
]
def must_be_built(self):
return None
class CheckPassTest(Checker):
expressions = [
re.compile(r'\.pass_test()'),
]
def must_be_built(self):
return None
class CheckExpandedCopyright(Checker):
expressions = [
re.compile('Copyright.*The SCons Foundation'),
]
def must_be_built(self):
return 1
check_list = [
CheckUnexpandedStrings(
'src',
search_list = [ '*.py' ],
remove_list = [
'engine/SCons/compat/_scons_sets.py',
'engine/SCons/compat/_scons_subprocess.py',
'engine/SCons/Conftest.py',
'engine/SCons/dblite.py',
],
),
CheckUnexpandedStrings(
'test',
search_list = [ '*.py' ],
),
CheckPassTest(
'test',
search_list = [ '*.py' ],
remove_list = [
'Fortran/common.py',
],
),
CheckExpandedCopyright(
build_scons,
remove_list = [
'build',
'build-stamp',
'configure-stamp',
'debian',
'dist',
'gentoo',
'engine/SCons/compat/_scons_sets.py',
'engine/SCons/compat/_scons_subprocess.py',
'engine/SCons/Conftest.py',
'engine/SCons/dblite.py',
'MANIFEST',
'setup.cfg',
],
# We run epydoc on the *.py files, which generates *.pyc files.
remove_patterns = [
'*.pyc',
]
),
CheckExpandedCopyright(
build_local,
remove_list = [
'SCons/compat/_scons_sets.py',
'SCons/compat/_scons_subprocess.py',
'SCons/Conftest.py',
'SCons/dblite.py',
'scons-%s.egg-info' % scons_version,
],
),
CheckExpandedCopyright(
build_src,
remove_list = [
'bench/timeit.py',
'bin',
'config',
'debian',
'gentoo',
'doc/design',
'doc/MANIFEST',
'doc/python10',
'doc/reference',
'doc/developer/MANIFEST',
'doc/man/MANIFEST',
'doc/user/cons.pl',
'doc/user/MANIFEST',
'doc/user/SCons-win32-install-1.jpg',
'doc/user/SCons-win32-install-2.jpg',
'doc/user/SCons-win32-install-3.jpg',
'doc/user/SCons-win32-install-4.jpg',
'examples',
'gentoo',
'QMTest/classes.qmc',
'QMTest/configuration',
'QMTest/TestCmd.py',
'QMTest/TestCmdTests.py',
'QMTest/TestCommon.py',
'QMTest/TestCommonTests.py',
'src/MANIFEST.in',
'src/setup.cfg',
'src/engine/MANIFEST.in',
'src/engine/MANIFEST-xml.in',
'src/engine/setup.cfg',
'src/engine/SCons/compat/_scons_sets.py',
'src/engine/SCons/compat/_scons_subprocess.py',
'src/engine/SCons/Conftest.py',
'src/engine/SCons/dblite.py',
'src/script/MANIFEST.in',
'src/script/setup.cfg',
'test/Fortran/.exclude_tests',
'timings/changelog.html',
'timings/ElectricCloud/genscons.pl',
'timings/graph.html',
'timings/index.html',
'review.py',
],
remove_patterns = [
'*.js',
]
),
]
missing_strings = []
not_built = []
for collector in check_list:
if collector.directory_exists():
missing_strings.extend(collector.find_missing())
elif collector.must_be_built():
not_built.append(collector.directory)
if missing_strings:
print "Found the following files with missing strings:"
print "\t" + "\n\t".join(missing_strings)
test.fail_test(1)
if not_built:
print "Cannot check all strings, the following have apparently not been built:"
print "\t" + "\n\t".join(not_built)
test.no_result(1)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "3bb98bced34cd572121d17a16ddff1b1",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 83,
"avg_line_length": 28.59504132231405,
"alnum_prop": 0.5332369942196532,
"repo_name": "andrewyoung1991/scons",
"id": "3288d5f62b7b74d0b65ad04f3a318c57a6448cb2",
"size": "8022",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "746"
},
{
"name": "C++",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "44714"
},
{
"name": "Python",
"bytes": "7385906"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52194"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import gc
from ilastik.gui.ribbons.ilastikTabBase import IlastikTabBase, TabButton
from ilastik.core.dataMgr import PropertyMgr
from ilastik.gui.overlayWidget import OverlayWidget
from ilastik.gui.iconMgr import ilastikIcons
from ilastik.modules.classification.gui.guiThreads import *
from ilastik.modules.classification.gui.labelWidget import LabelListWidget
from ilastik.modules.classification.gui.featureDlg import FeatureDlg
from ilastik.modules.classification.gui.classifierSelectionDialog import ClassifierSelectionDlg
#*******************************************************************************
# C l a s s i f i c a t i o n T a b *
#*******************************************************************************
class ClassificationTab(IlastikTabBase, QtGui.QWidget):
name = 'Classification'
position = 1
moduleName = "Classification"
def __init__(self, parent=None):
IlastikTabBase.__init__(self, parent)
QtGui.QWidget.__init__(self, parent)
self._initContent()
self._initConnects()
def on_activation(self):
if self.ilastik.project is None:
return
if self.ilastik._activeImage.module[self.name] is None:
self.ilastik._activeImage.module[self.name] = PropertyMgr(self.ilastik._activeImage)
ovs = self.ilastik._activeImage.module[self.name].getOverlayRefs()
raw = self.ilastik._activeImage.overlayMgr["Raw Data"]
self.ilastik.labelWidget._history.volumeEditor = self.ilastik.labelWidget
overlayWidget = OverlayWidget(self.ilastik.labelWidget, self.ilastik.project.dataMgr)
self.ilastik.labelWidget.setOverlayWidget(overlayWidget)
ov = self.ilastik._activeImage.overlayMgr["Classification/Labels"]
overlayWidget.addOverlayRef(ov.getRef())
overlayWidget.addOverlayRef(raw.getRef())
self.ilastik.labelWidget.setLabelWidget(LabelListWidget(self.ilastik.project.dataMgr.module["Classification"].labelMgr, self.ilastik.project.dataMgr.module["Classification"]["labelDescriptions"], self.ilastik.labelWidget, ov))
def on_deActivation(self):
if self.ilastik.project is None:
return
if hasattr(self.parent, "classificationInteractive"):
self.btnStartLive.click()
if self.ilastik.labelWidget is not None and self.ilastik.labelWidget._history != self.ilastik._activeImage.module["Classification"]["labelHistory"]:
self.ilastik._activeImage.module["Classification"]["labelHistory"] = self.ilastik.labelWidget._history
def _initContent(self):
tl = QtGui.QHBoxLayout()
tl.setMargin(0)
self.btnSelectFeatures = TabButton('Select Features', ilastikIcons.Select)
self.btnStartLive = TabButton('Start Live Prediction', ilastikIcons.Play)
self.btnStartLive.setCheckable(True)
self.btnTrainPredict = TabButton('Train and Predict', ilastikIcons.System)
self.btnExportClassifier = TabButton('Export Classifier', ilastikIcons.Select)
self.btnClassifierOptions = TabButton('Classifier Options', ilastikIcons.Select)
self.btnSelectFeatures.setToolTip('Select and compute features')
self.btnStartLive.setToolTip('Toggle interactive prediction of the current image while labeling')
self.btnTrainPredict.setToolTip('Train and predict all images offline; this step is necessary for automation')
self.btnExportClassifier.setToolTip('Save current classifier and its feature settings')
self.btnClassifierOptions.setToolTip('Select a classifier and change its settings')
self.on_otherProject()
tl.addWidget(self.btnSelectFeatures)
tl.addWidget(self.btnStartLive)
tl.addWidget(self.btnTrainPredict)
tl.addStretch()
tl.addWidget(self.btnExportClassifier)
tl.addWidget(self.btnClassifierOptions)
self.setLayout(tl)
def _initConnects(self):
self.connect(self.btnSelectFeatures, QtCore.SIGNAL('clicked()'), self.on_btnSelectFeatures_clicked)
self.connect(self.btnStartLive, QtCore.SIGNAL('toggled(bool)'), self.on_btnStartLive_clicked)
self.connect(self.btnTrainPredict, QtCore.SIGNAL('clicked()'), self.on_btnTrainPredict_clicked)
self.connect(self.btnExportClassifier, QtCore.SIGNAL('clicked()'), self.on_btnExportClassifier_clicked)
self.connect(self.btnClassifierOptions, QtCore.SIGNAL('clicked()'), self.on_btnClassifierOptions_clicked)
def on_otherProject(self):
self.btnSelectFeatures.setEnabled(True)
self.btnStartLive.setEnabled(False)
self.btnTrainPredict.setEnabled(False)
self.btnExportClassifier.setEnabled(False)
self.btnClassifierOptions.setEnabled(True)
def on_btnSelectFeatures_clicked(self):
preview = self.parent.project.dataMgr[0]._dataVol._data[0,0,:,:,0:3]
newFeatureDlg = FeatureDlg(self.ilastik, preview)
answer = newFeatureDlg.exec_()
if answer == QtGui.QDialog.Accepted:
self.featureComputation = FeatureComputation(self.ilastik)
newFeatureDlg.close()
newFeatureDlg.deleteLater()
del newFeatureDlg
gc.collect()
def on_btnStartLive_clicked(self, state):
if state:
self.ilastik.ribbon.getTab('Classification').btnStartLive.setText('Stop Live Prediction')
self.classificationInteractive = ClassificationInteractive(self.ilastik)
else:
self.classificationInteractive.stop()
self.ilastik.ribbon.getTab('Classification').btnStartLive.setText('Start Live Prediction')
def on_btnTrainPredict_clicked(self):
self.classificationTrain = ClassificationTrain(self.ilastik)
self.connect(self.classificationTrain, QtCore.SIGNAL("trainingFinished()"), self.on_trainingFinished)
def on_trainingFinished(self):
print "Training finished"
self.classificationPredict = ClassificationPredict(self.ilastik)
def on_btnExportClassifier_clicked(self):
fileName = QtGui.QFileDialog.getSaveFileName(self, "Export Classifier", filter = "HDF5 Files (*.h5)")
try:
self.ilastik.project.dataMgr.Classification.exportClassifiers(fileName)
except (RuntimeError, AttributeError, IOError) as e:
QtGui.QMessageBox.warning(self, 'Error', str(e), QtGui.QMessageBox.Ok)
return
try:
self.ilastik.project.dataMgr.Classification.featureMgr.exportFeatureItems(fileName)
except RuntimeError as e:
QtGui.QMessageBox.warning(self, 'Error', str(e), QtGui.QMessageBox.Ok)
return
QtGui.QMessageBox.information(self, 'Success', "The classifier and the feature information have been saved successfully to:\n %s" % str(fileName), QtGui.QMessageBox.Ok)
def on_btnClassifierOptions_clicked(self):
dialog = ClassifierSelectionDlg(self.parent)
self.parent.project.dataMgr.module["Classification"].classifier = dialog.exec_() | {
"content_hash": "81b35b76b28d951ffd2edbae58544db5",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 237,
"avg_line_length": 48.06535947712418,
"alnum_prop": 0.6694316018493337,
"repo_name": "ilastik/ilastik-0.5",
"id": "7fed66f036770172b521879e8c6f0bd3782bbe1d",
"size": "7354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ilastik/modules/classification/gui/classificationRibbon.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "253122"
},
{
"name": "Python",
"bytes": "1053042"
},
{
"name": "Shell",
"bytes": "2694"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ErrorType(object):
NoError = 0
Server = 1
Network = 2
Authentication = 3
Validation = 4
Other = 5
NotImplemented = 6
FileNotFound = 7
_VALUES_TO_NAMES = {
0: "NoError",
1: "Server",
2: "Network",
3: "Authentication",
4: "Validation",
5: "Other",
6: "NotImplemented",
7: "FileNotFound",
}
_NAMES_TO_VALUES = {
"NoError": 0,
"Server": 1,
"Network": 2,
"Authentication": 3,
"Validation": 4,
"Other": 5,
"NotImplemented": 6,
"FileNotFound": 7,
}
class ArchiveStatus(object):
Completed = 0
InProgress = 1
Paused = 2
Stopped = 3
Failed = 4
Local = 5
_VALUES_TO_NAMES = {
0: "Completed",
1: "InProgress",
2: "Paused",
3: "Stopped",
4: "Failed",
5: "Local",
}
_NAMES_TO_VALUES = {
"Completed": 0,
"InProgress": 1,
"Paused": 2,
"Stopped": 3,
"Failed": 4,
"Local": 5,
}
class CertExportFormat(object):
HTML = 0
YAML = 1
PDF = 2
_VALUES_TO_NAMES = {
0: "HTML",
1: "YAML",
2: "PDF",
}
_NAMES_TO_VALUES = {
"HTML": 0,
"YAML": 1,
"PDF": 2,
}
class InvalidOperation(TException):
"""
Attributes:
- what
- why
- filename
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'what', None, None, ), # 1
(2, TType.STRING, 'why', None, None, ), # 2
(3, TType.STRING, 'filename', None, None, ), # 3
)
def __init__(self, what=None, why=None, filename=None,):
self.what = what
self.why = why
self.filename = filename
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.what = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.why = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.filename = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidOperation')
if self.what is not None:
oprot.writeFieldBegin('what', TType.I32, 1)
oprot.writeI32(self.what)
oprot.writeFieldEnd()
if self.why is not None:
oprot.writeFieldBegin('why', TType.STRING, 2)
oprot.writeString(self.why)
oprot.writeFieldEnd()
if self.filename is not None:
oprot.writeFieldBegin('filename', TType.STRING, 3)
oprot.writeString(self.filename)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DateInfo(object):
"""
Attributes:
- Day
- Month
- Year
- Hour
- Minutes
- Seconds
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'Day', None, None, ), # 1
(2, TType.I32, 'Month', None, None, ), # 2
(3, TType.I32, 'Year', None, None, ), # 3
(4, TType.I32, 'Hour', None, None, ), # 4
(5, TType.I32, 'Minutes', None, None, ), # 5
(6, TType.I32, 'Seconds', None, None, ), # 6
)
def __init__(self, Day=None, Month=None, Year=None, Hour=None, Minutes=None, Seconds=None,):
self.Day = Day
self.Month = Month
self.Year = Year
self.Hour = Hour
self.Minutes = Minutes
self.Seconds = Seconds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.Day = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.Month = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.Year = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.Hour = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.Minutes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.Seconds = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DateInfo')
if self.Day is not None:
oprot.writeFieldBegin('Day', TType.I32, 1)
oprot.writeI32(self.Day)
oprot.writeFieldEnd()
if self.Month is not None:
oprot.writeFieldBegin('Month', TType.I32, 2)
oprot.writeI32(self.Month)
oprot.writeFieldEnd()
if self.Year is not None:
oprot.writeFieldBegin('Year', TType.I32, 3)
oprot.writeI32(self.Year)
oprot.writeFieldEnd()
if self.Hour is not None:
oprot.writeFieldBegin('Hour', TType.I32, 4)
oprot.writeI32(self.Hour)
oprot.writeFieldEnd()
if self.Minutes is not None:
oprot.writeFieldBegin('Minutes', TType.I32, 5)
oprot.writeI32(self.Minutes)
oprot.writeFieldEnd()
if self.Seconds is not None:
oprot.writeFieldBegin('Seconds', TType.I32, 6)
oprot.writeI32(self.Seconds)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TransferStatus(object):
"""
Attributes:
- StatusDescription
- ETA
- RemainingBytes
- Progress
- Status
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'StatusDescription', None, None, ), # 1
(2, TType.STRING, 'ETA', None, None, ), # 2
(3, TType.I64, 'RemainingBytes', None, None, ), # 3
(4, TType.DOUBLE, 'Progress', None, None, ), # 4
(5, TType.I32, 'Status', None, None, ), # 5
)
def __init__(self, StatusDescription=None, ETA=None, RemainingBytes=None, Progress=None, Status=None,):
self.StatusDescription = StatusDescription
self.ETA = ETA
self.RemainingBytes = RemainingBytes
self.Progress = Progress
self.Status = Status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.StatusDescription = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ETA = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.RemainingBytes = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.Progress = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.Status = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TransferStatus')
if self.StatusDescription is not None:
oprot.writeFieldBegin('StatusDescription', TType.STRING, 1)
oprot.writeString(self.StatusDescription)
oprot.writeFieldEnd()
if self.ETA is not None:
oprot.writeFieldBegin('ETA', TType.STRING, 2)
oprot.writeString(self.ETA)
oprot.writeFieldEnd()
if self.RemainingBytes is not None:
oprot.writeFieldBegin('RemainingBytes', TType.I64, 3)
oprot.writeI64(self.RemainingBytes)
oprot.writeFieldEnd()
if self.Progress is not None:
oprot.writeFieldBegin('Progress', TType.DOUBLE, 4)
oprot.writeDouble(self.Progress)
oprot.writeFieldEnd()
if self.Status is not None:
oprot.writeFieldBegin('Status', TType.I32, 5)
oprot.writeI32(self.Status)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ArchiveInfo(object):
"""
Attributes:
- Title
- Description
- SizeInBytes
- CreatedDate
- Md5HexDigits
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'Title', None, None, ), # 1
(2, TType.STRING, 'Description', None, None, ), # 2
(3, TType.I64, 'SizeInBytes', None, None, ), # 3
(4, TType.STRUCT, 'CreatedDate', (DateInfo, DateInfo.thrift_spec), None, ), # 4
(5, TType.STRING, 'Md5HexDigits', None, None, ), # 5
)
def __init__(self, Title=None, Description=None, SizeInBytes=None, CreatedDate=None, Md5HexDigits=None,):
self.Title = Title
self.Description = Description
self.SizeInBytes = SizeInBytes
self.CreatedDate = CreatedDate
self.Md5HexDigits = Md5HexDigits
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.Title = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.Description = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.SizeInBytes = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.CreatedDate = DateInfo()
self.CreatedDate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.Md5HexDigits = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ArchiveInfo')
if self.Title is not None:
oprot.writeFieldBegin('Title', TType.STRING, 1)
oprot.writeString(self.Title)
oprot.writeFieldEnd()
if self.Description is not None:
oprot.writeFieldBegin('Description', TType.STRING, 2)
oprot.writeString(self.Description)
oprot.writeFieldEnd()
if self.SizeInBytes is not None:
oprot.writeFieldBegin('SizeInBytes', TType.I64, 3)
oprot.writeI64(self.SizeInBytes)
oprot.writeFieldEnd()
if self.CreatedDate is not None:
oprot.writeFieldBegin('CreatedDate', TType.STRUCT, 4)
self.CreatedDate.write(oprot)
oprot.writeFieldEnd()
if self.Md5HexDigits is not None:
oprot.writeFieldBegin('Md5HexDigits', TType.STRING, 5)
oprot.writeString(self.Md5HexDigits)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Capsule(object):
"""
Attributes:
- Created
- ID
- Resource_URI
- Title
- User
- ExpirationDate
- TotalSizeInBytes
- AvailableSizeInBytes
- CapsuleContents
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'Created', None, None, ), # 1
(2, TType.STRING, 'ID', None, None, ), # 2
(3, TType.STRING, 'Resource_URI', None, None, ), # 3
(4, TType.STRING, 'Title', None, None, ), # 4
(5, TType.STRING, 'User', None, None, ), # 5
(6, TType.STRUCT, 'ExpirationDate', (DateInfo, DateInfo.thrift_spec), None, ), # 6
(7, TType.I64, 'TotalSizeInBytes', None, None, ), # 7
(8, TType.I64, 'AvailableSizeInBytes', None, None, ), # 8
(9, TType.LIST, 'CapsuleContents', (TType.STRUCT,(ArchiveInfo, ArchiveInfo.thrift_spec)), None, ), # 9
)
def __init__(self, Created=None, ID=None, Resource_URI=None, Title=None, User=None, ExpirationDate=None, TotalSizeInBytes=None, AvailableSizeInBytes=None, CapsuleContents=None,):
self.Created = Created
self.ID = ID
self.Resource_URI = Resource_URI
self.Title = Title
self.User = User
self.ExpirationDate = ExpirationDate
self.TotalSizeInBytes = TotalSizeInBytes
self.AvailableSizeInBytes = AvailableSizeInBytes
self.CapsuleContents = CapsuleContents
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.Created = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ID = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.Resource_URI = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.Title = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.User = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.ExpirationDate = DateInfo()
self.ExpirationDate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.TotalSizeInBytes = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.AvailableSizeInBytes = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.CapsuleContents = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = ArchiveInfo()
_elem5.read(iprot)
self.CapsuleContents.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Capsule')
if self.Created is not None:
oprot.writeFieldBegin('Created', TType.STRING, 1)
oprot.writeString(self.Created)
oprot.writeFieldEnd()
if self.ID is not None:
oprot.writeFieldBegin('ID', TType.STRING, 2)
oprot.writeString(self.ID)
oprot.writeFieldEnd()
if self.Resource_URI is not None:
oprot.writeFieldBegin('Resource_URI', TType.STRING, 3)
oprot.writeString(self.Resource_URI)
oprot.writeFieldEnd()
if self.Title is not None:
oprot.writeFieldBegin('Title', TType.STRING, 4)
oprot.writeString(self.Title)
oprot.writeFieldEnd()
if self.User is not None:
oprot.writeFieldBegin('User', TType.STRING, 5)
oprot.writeString(self.User)
oprot.writeFieldEnd()
if self.ExpirationDate is not None:
oprot.writeFieldBegin('ExpirationDate', TType.STRUCT, 6)
self.ExpirationDate.write(oprot)
oprot.writeFieldEnd()
if self.TotalSizeInBytes is not None:
oprot.writeFieldBegin('TotalSizeInBytes', TType.I64, 7)
oprot.writeI64(self.TotalSizeInBytes)
oprot.writeFieldEnd()
if self.AvailableSizeInBytes is not None:
oprot.writeFieldBegin('AvailableSizeInBytes', TType.I64, 8)
oprot.writeI64(self.AvailableSizeInBytes)
oprot.writeFieldEnd()
if self.CapsuleContents is not None:
oprot.writeFieldBegin('CapsuleContents', TType.LIST, 9)
oprot.writeListBegin(TType.STRUCT, len(self.CapsuleContents))
for iter6 in self.CapsuleContents:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Archive(object):
"""
Attributes:
- LocalID
- Status
- Info
- Sandbox
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'LocalID', None, None, ), # 1
(2, TType.I32, 'Status', None, None, ), # 2
(3, TType.STRUCT, 'Info', (ArchiveInfo, ArchiveInfo.thrift_spec), None, ), # 3
(4, TType.BOOL, 'Sandbox', None, None, ), # 4
)
def __init__(self, LocalID=None, Status=None, Info=None, Sandbox=None,):
self.LocalID = LocalID
self.Status = Status
self.Info = Info
self.Sandbox = Sandbox
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.LocalID = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.Status = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.Info = ArchiveInfo()
self.Info.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.Sandbox = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Archive')
if self.LocalID is not None:
oprot.writeFieldBegin('LocalID', TType.STRING, 1)
oprot.writeString(self.LocalID)
oprot.writeFieldEnd()
if self.Status is not None:
oprot.writeFieldBegin('Status', TType.I32, 2)
oprot.writeI32(self.Status)
oprot.writeFieldEnd()
if self.Info is not None:
oprot.writeFieldBegin('Info', TType.STRUCT, 3)
self.Info.write(oprot)
oprot.writeFieldEnd()
if self.Sandbox is not None:
oprot.writeFieldBegin('Sandbox', TType.BOOL, 4)
oprot.writeBool(self.Sandbox)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Signature(object):
"""
Attributes:
- ArchiveID
- DateCreated
- UploaderName
- UploaderEmail
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'ArchiveID', None, None, ), # 1
(2, TType.STRUCT, 'DateCreated', (DateInfo, DateInfo.thrift_spec), None, ), # 2
(3, TType.STRING, 'UploaderName', None, None, ), # 3
(4, TType.STRING, 'UploaderEmail', None, None, ), # 4
)
def __init__(self, ArchiveID=None, DateCreated=None, UploaderName=None, UploaderEmail=None,):
self.ArchiveID = ArchiveID
self.DateCreated = DateCreated
self.UploaderName = UploaderName
self.UploaderEmail = UploaderEmail
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ArchiveID = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.DateCreated = DateInfo()
self.DateCreated.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.UploaderName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.UploaderEmail = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Signature')
if self.ArchiveID is not None:
oprot.writeFieldBegin('ArchiveID', TType.STRING, 1)
oprot.writeString(self.ArchiveID)
oprot.writeFieldEnd()
if self.DateCreated is not None:
oprot.writeFieldBegin('DateCreated', TType.STRUCT, 2)
self.DateCreated.write(oprot)
oprot.writeFieldEnd()
if self.UploaderName is not None:
oprot.writeFieldBegin('UploaderName', TType.STRING, 3)
oprot.writeString(self.UploaderName)
oprot.writeFieldEnd()
if self.UploaderEmail is not None:
oprot.writeFieldBegin('UploaderEmail', TType.STRING, 4)
oprot.writeString(self.UploaderEmail)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Certificate(object):
"""
Attributes:
- HexDigitsKey
- Sig
- RelatedArchive
- LocalID
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'HexDigitsKey', None, None, ), # 1
(2, TType.STRUCT, 'Sig', (Signature, Signature.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'RelatedArchive', (ArchiveInfo, ArchiveInfo.thrift_spec), None, ), # 3
(4, TType.STRING, 'LocalID', None, None, ), # 4
)
def __init__(self, HexDigitsKey=None, Sig=None, RelatedArchive=None, LocalID=None,):
self.HexDigitsKey = HexDigitsKey
self.Sig = Sig
self.RelatedArchive = RelatedArchive
self.LocalID = LocalID
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.HexDigitsKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.Sig = Signature()
self.Sig.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.RelatedArchive = ArchiveInfo()
self.RelatedArchive.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.LocalID = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Certificate')
if self.HexDigitsKey is not None:
oprot.writeFieldBegin('HexDigitsKey', TType.STRING, 1)
oprot.writeString(self.HexDigitsKey)
oprot.writeFieldEnd()
if self.Sig is not None:
oprot.writeFieldBegin('Sig', TType.STRUCT, 2)
self.Sig.write(oprot)
oprot.writeFieldEnd()
if self.RelatedArchive is not None:
oprot.writeFieldBegin('RelatedArchive', TType.STRUCT, 3)
self.RelatedArchive.write(oprot)
oprot.writeFieldEnd()
if self.LocalID is not None:
oprot.writeFieldBegin('LocalID', TType.STRING, 4)
oprot.writeString(self.LocalID)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Settings(object):
"""
Attributes:
- StoredUserName
- StoredPassword
- RememberMe
- ArchivesFolder
- CertificatesFolder
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'StoredUserName', None, None, ), # 1
(2, TType.STRING, 'StoredPassword', None, None, ), # 2
(3, TType.BOOL, 'RememberMe', None, None, ), # 3
(4, TType.STRING, 'ArchivesFolder', None, None, ), # 4
(5, TType.STRING, 'CertificatesFolder', None, None, ), # 5
)
def __init__(self, StoredUserName=None, StoredPassword=None, RememberMe=None, ArchivesFolder=None, CertificatesFolder=None,):
self.StoredUserName = StoredUserName
self.StoredPassword = StoredPassword
self.RememberMe = RememberMe
self.ArchivesFolder = ArchivesFolder
self.CertificatesFolder = CertificatesFolder
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.StoredUserName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.StoredPassword = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.RememberMe = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.ArchivesFolder = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.CertificatesFolder = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Settings')
if self.StoredUserName is not None:
oprot.writeFieldBegin('StoredUserName', TType.STRING, 1)
oprot.writeString(self.StoredUserName)
oprot.writeFieldEnd()
if self.StoredPassword is not None:
oprot.writeFieldBegin('StoredPassword', TType.STRING, 2)
oprot.writeString(self.StoredPassword)
oprot.writeFieldEnd()
if self.RememberMe is not None:
oprot.writeFieldBegin('RememberMe', TType.BOOL, 3)
oprot.writeBool(self.RememberMe)
oprot.writeFieldEnd()
if self.ArchivesFolder is not None:
oprot.writeFieldBegin('ArchivesFolder', TType.STRING, 4)
oprot.writeString(self.ArchivesFolder)
oprot.writeFieldEnd()
if self.CertificatesFolder is not None:
oprot.writeFieldBegin('CertificatesFolder', TType.STRING, 5)
oprot.writeString(self.CertificatesFolder)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class VersionInfo(object):
"""
Attributes:
- version
- description
- uri
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'version', None, None, ), # 1
(2, TType.STRING, 'description', None, None, ), # 2
(3, TType.STRING, 'uri', None, None, ), # 3
)
def __init__(self, version=None, description=None, uri=None,):
self.version = version
self.description = description
self.uri = uri
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.version = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.description = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.uri = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('VersionInfo')
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 1)
oprot.writeString(self.version)
oprot.writeFieldEnd()
if self.description is not None:
oprot.writeFieldBegin('description', TType.STRING, 2)
oprot.writeString(self.description)
oprot.writeFieldEnd()
if self.uri is not None:
oprot.writeFieldBegin('uri', TType.STRING, 3)
oprot.writeString(self.uri)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| {
"content_hash": "d94bae7cf9ae4a78e2b7a5b75c1e1e97",
"timestamp": "",
"source": "github",
"line_count": 1159,
"max_line_length": 188,
"avg_line_length": 31.055220017256257,
"alnum_prop": 0.6163976328730586,
"repo_name": "longaccess/longaccess-client",
"id": "f6efefd88528bc58ae1549a966d87d998f5cf16d",
"size": "36152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lacli/server/interface/ClientInterface/ttypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "15176"
},
{
"name": "PowerShell",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "342473"
},
{
"name": "Shell",
"bytes": "4450"
}
],
"symlink_target": ""
} |
import optparse
import os
import sys
from util import build_utils
from util import proguard_util
def _ParseOptions(args):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--proguard-path',
help='Path to the proguard executable.')
parser.add_option('--input-paths',
help='Paths to the .jar files proguard should run on.')
parser.add_option('--output-path', help='Path to the generated .jar file.')
parser.add_option('--proguard-configs',
help='Paths to proguard configuration files.')
parser.add_option('--mapping', help='Path to proguard mapping to apply.')
parser.add_option('--is-test', action='store_true',
help='If true, extra proguard options for instrumentation tests will be '
'added.')
parser.add_option('--tested-apk-info', help='Path to the proguard .info file '
'for the tested apk')
parser.add_option('--classpath', action='append',
help='Classpath for proguard.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--verbose', '-v', action='store_true',
help='Print all proguard output')
options, _ = parser.parse_args(args)
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGypList(arg)
options.classpath = classpath
return options
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseOptions(args)
proguard = proguard_util.ProguardCmdBuilder(options.proguard_path)
proguard.injars(build_utils.ParseGypList(options.input_paths))
proguard.configs(build_utils.ParseGypList(options.proguard_configs))
proguard.outjar(options.output_path)
if options.mapping:
proguard.mapping(options.mapping)
if options.tested_apk_info:
proguard.tested_apk_info(options.tested_apk_info)
classpath = list(set(options.classpath))
proguard.libraryjars(classpath)
proguard.verbose(options.verbose)
input_paths = proguard.GetInputs()
build_utils.CallAndWriteDepfileIfStale(
proguard.CheckOutput,
options,
input_paths=input_paths,
input_strings=proguard.build(),
output_paths=[options.output_path])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "10b3f9ce492e075178c018d68ff56a78",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 32.33802816901409,
"alnum_prop": 0.6890243902439024,
"repo_name": "wuhengzhi/chromium-crosswalk",
"id": "d019350a33ce56bd05c4d051f2757a99630e7c6f",
"size": "2483",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "build/android/gyp/proguard.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""The jewish_calendar component."""
import logging
from typing import Optional
import hdate
import voluptuous as vol
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
DOMAIN = "jewish_calendar"
SENSOR_TYPES = {
"binary": {
"issur_melacha_in_effect": ["Issur Melacha in Effect", "mdi:power-plug-off"]
},
"data": {
"date": ["Date", "mdi:judaism"],
"weekly_portion": ["Parshat Hashavua", "mdi:book-open-variant"],
"holiday": ["Holiday", "mdi:calendar-star"],
"omer_count": ["Day of the Omer", "mdi:counter"],
"daf_yomi": ["Daf Yomi", "mdi:book-open-variant"],
},
"time": {
"first_light": ["Alot Hashachar", "mdi:weather-sunset-up"],
"talit": ["Talit and Tefillin", "mdi:calendar-clock"],
"gra_end_shma": ['Latest time for Shma Gr"a', "mdi:calendar-clock"],
"mga_end_shma": ['Latest time for Shma MG"A', "mdi:calendar-clock"],
"gra_end_tfila": ['Latest time for Tefilla MG"A', "mdi:calendar-clock"],
"mga_end_tfila": ['Latest time for Tefilla Gr"a', "mdi:calendar-clock"],
"big_mincha": ["Mincha Gedola", "mdi:calendar-clock"],
"small_mincha": ["Mincha Ketana", "mdi:calendar-clock"],
"plag_mincha": ["Plag Hamincha", "mdi:weather-sunset-down"],
"sunset": ["Shkia", "mdi:weather-sunset"],
"first_stars": ["T'set Hakochavim", "mdi:weather-night"],
"upcoming_shabbat_candle_lighting": [
"Upcoming Shabbat Candle Lighting",
"mdi:candle",
],
"upcoming_shabbat_havdalah": ["Upcoming Shabbat Havdalah", "mdi:weather-night"],
"upcoming_candle_lighting": ["Upcoming Candle Lighting", "mdi:candle"],
"upcoming_havdalah": ["Upcoming Havdalah", "mdi:weather-night"],
},
}
CONF_DIASPORA = "diaspora"
CONF_LANGUAGE = "language"
CONF_CANDLE_LIGHT_MINUTES = "candle_lighting_minutes_before_sunset"
CONF_HAVDALAH_OFFSET_MINUTES = "havdalah_minutes_after_sunset"
CANDLE_LIGHT_DEFAULT = 18
DEFAULT_NAME = "Jewish Calendar"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DIASPORA, default=False): cv.boolean,
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_LANGUAGE, default="english"): vol.In(
["hebrew", "english"]
),
vol.Optional(
CONF_CANDLE_LIGHT_MINUTES, default=CANDLE_LIGHT_DEFAULT
): int,
# Default of 0 means use 8.5 degrees / 'three_stars' time.
vol.Optional(CONF_HAVDALAH_OFFSET_MINUTES, default=0): int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def get_unique_prefix(
location: hdate.Location,
language: str,
candle_lighting_offset: Optional[int],
havdalah_offset: Optional[int],
) -> str:
"""Create a prefix for unique ids."""
config_properties = [
location.latitude,
location.longitude,
location.timezone,
location.altitude,
location.diaspora,
language,
candle_lighting_offset,
havdalah_offset,
]
prefix = "_".join(map(str, config_properties))
return f"{prefix}"
async def async_setup(hass, config):
"""Set up the Jewish Calendar component."""
name = config[DOMAIN][CONF_NAME]
language = config[DOMAIN][CONF_LANGUAGE]
latitude = config[DOMAIN].get(CONF_LATITUDE, hass.config.latitude)
longitude = config[DOMAIN].get(CONF_LONGITUDE, hass.config.longitude)
diaspora = config[DOMAIN][CONF_DIASPORA]
candle_lighting_offset = config[DOMAIN][CONF_CANDLE_LIGHT_MINUTES]
havdalah_offset = config[DOMAIN][CONF_HAVDALAH_OFFSET_MINUTES]
location = hdate.Location(
latitude=latitude,
longitude=longitude,
timezone=hass.config.time_zone,
diaspora=diaspora,
)
prefix = get_unique_prefix(
location, language, candle_lighting_offset, havdalah_offset
)
hass.data[DOMAIN] = {
"location": location,
"name": name,
"language": language,
"candle_lighting_offset": candle_lighting_offset,
"havdalah_offset": havdalah_offset,
"diaspora": diaspora,
"prefix": prefix,
}
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
hass.async_create_task(
async_load_platform(hass, "binary_sensor", DOMAIN, {}, config)
)
return True
| {
"content_hash": "4ad1a031e69b99e250397186b4c99c37",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 88,
"avg_line_length": 34.48571428571429,
"alnum_prop": 0.6174399337199669,
"repo_name": "tchellomello/home-assistant",
"id": "dfde274faa8c910d9c5631cadf398f90c958699b",
"size": "4828",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/jewish_calendar/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
"""
# test_fil2h5
"""
import pytest
import os
import blimpy as bl
from tests.data import voyager_fil
VOYA_DIR = os.path.dirname(voyager_fil) + "/"
def name_case(in_string, out_string):
infile = in_string
outfile = out_string
os.system("cp " + voyager_fil + " " + infile)
bl.fil2h5.make_h5_file(infile)
if not os.path.exists(outfile):
print("\n*** name_case: file {} does not exist. Input file {}\n".format(outfile, infile))
assert False
os.remove(infile)
os.remove(outfile)
def test_fil2h5_conversion():
""" Tests the conversion of fil files into h5 in both light and heavy modes.
"""
# Creating test file.
bl.fil2h5.make_h5_file(voyager_fil, new_filename='test.h5')
# Testing filename
bl.fil2h5.make_h5_file(voyager_fil, new_filename='test')
# Deleting test file
os.remove('test.h5')
def test_cmd_tool():
"""
This is the same test file, but now through the cmd tool.
"""
#with pytest.raises(SystemExit):
args = [voyager_fil, '-n', VOYA_DIR + 'cmd.h5']
bl.fil2h5.cmd_tool(args=args)
def test_fil2h5_input_names():
""" Make sure that the output name does not get mangled.
"""
name_case("abcd.filter.def.fil", "abcd.filter.def.h5")
name_case("abcd.efgh", "abcd.efgh.h5")
name_case("abcd", "abcd.h5")
def test_no_args():
"""
The cmd tool needs to exit, mandating a file name.
"""
with pytest.raises(SystemExit):
bl.fil2h5.cmd_tool("")
if __name__ == "__main__":
test_fil2h5_conversion()
test_cmd_tool()
test_fil2h5_input_names()
test_no_args()
| {
"content_hash": "4154f2bc9b4f2f04d26c9066e685752a",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 24.861538461538462,
"alnum_prop": 0.6262376237623762,
"repo_name": "UCBerkeleySETI/blimpy",
"id": "9a08710439a1966251fe6756ac42e8b1f6168954",
"size": "1616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fil2h5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "492"
},
{
"name": "Jupyter Notebook",
"bytes": "179482"
},
{
"name": "Python",
"bytes": "282017"
},
{
"name": "Shell",
"bytes": "1125"
},
{
"name": "TeX",
"bytes": "13936"
}
],
"symlink_target": ""
} |
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import sys
from bunch import *
import numpy as np
import random
from sklearn.linear_model import LogisticRegression
from nltk.stem import WordNetLemmatizer
import re
import cPickle
def load_unlabeled_docs_processed(data_path):
b = Bunch()
b.data = []
b.pmids = []
with open(data_path) as f:
ctr = -1
for line in f:
ctr += 1
if ctr % 100000 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = line.strip().split('\t')
pmid = item[0]
data = item[1]
b.pmids.append(pmid)
b.data.append(data)
return b
if __name__ == "__main__":
if len(sys.argv) != 3:
print >>sys.stderr, "need 2 args: path to all (pubmed) TSV, output path"
sys.exit(1)
all_path = sys.argv[1]
with open('clf.pkl', 'rb') as f:
print >>sys.stderr, "Loading classifier for %s" % all_path
clf = cPickle.load(f)
with open('count_vect.pkl', 'rb') as f:
print >>sys.stderr, "Loading count vectorizer for %s" % all_path
count_vect = cPickle.load(f)
with open('tfidf_transformer.pkl', 'rb') as f:
print >>sys.stderr, "Loading tfidf transformer for %s" % all_path
tfidf_transformer = cPickle.load(f)
print >>sys.stderr, "Loading all docs"
docs_new = load_unlabeled_docs_processed(all_path)
print >>sys.stderr, "Number of docs: %d" % len(docs_new.data)
print >>sys.stderr, "Transforming new docs through count vectorization"
X_new_counts = count_vect.transform(docs_new.data)
print >>sys.stderr, "Transforming new docs through tf-idf"
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
print >>sys.stderr, "Predicting over new docs"
predicted = clf.predict(X_new_tfidf)
print >>sys.stderr, "Printing to %s" % sys.argv[2]
with open(sys.argv[2], 'w') as f:
for i, value in enumerate(predicted):
if value == 1:
print >>f, docs_new.pmids[i]
| {
"content_hash": "0508602d96a3cdb2e15ee0aaba327560",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 76,
"avg_line_length": 34.05172413793103,
"alnum_prop": 0.6632911392405063,
"repo_name": "HazyResearch/dd-genomics",
"id": "83b4cf2c6964279ebd9501ca3dd77d7cf7a8cbd5",
"size": "2019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "document_classifier/classification/classify.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "22186"
},
{
"name": "Java",
"bytes": "25863"
},
{
"name": "JavaScript",
"bytes": "10928"
},
{
"name": "Jupyter Notebook",
"bytes": "19968"
},
{
"name": "Python",
"bytes": "510253"
},
{
"name": "Shell",
"bytes": "196808"
}
],
"symlink_target": ""
} |
"""
Zonomi DNS Driver
"""
from libcloud.common.zonomi import ZonomiConnection, ZonomiResponse
from libcloud.common.zonomi import ZonomiException
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordAlreadyExistsError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import Provider, RecordType
__all__ = [
'ZonomiDNSDriver',
]
class ZonomiDNSResponse(ZonomiResponse):
pass
class ZonomiDNSConnection(ZonomiConnection):
responseCls = ZonomiDNSResponse
class ZonomiDNSDriver(DNSDriver):
type = Provider.ZONOMI
name = 'Zonomi DNS'
website = 'https://zonomi.com'
connectionCls = ZonomiDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.MX: 'MX',
RecordType.TXT: 'TXT'
}
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
action = '/app/dns/dyndns.jsp?'
params = {'action': 'QUERYZONES', 'api_key': self.key}
response = self.connection.request(action=action, params=params)
zones = self._to_zones(response.objects)
return zones
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
action = '/app/dns/dyndns.jsp?'
params = {'action': 'QUERY', 'name': '**.' + zone.id}
try:
response = self.connection.request(action=action, params=params)
except ZonomiException as e:
if e.code == '404':
raise ZoneDoesNotExistError(zone_id=zone.id, driver=self,
value=e.message)
raise e
records = self._to_records(response.objects, zone)
return records
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
zone = None
zones = self.list_zones()
for z in zones:
if z.id == zone_id:
zone = z
if zone is None:
raise ZoneDoesNotExistError(zone_id=zone_id, driver=self, value='')
return zone
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
record = None
zone = self.get_zone(zone_id=zone_id)
records = self.list_records(zone=zone)
for r in records:
if r.id == record_id:
record = r
if record is None:
raise RecordDoesNotExistError(record_id=record_id, driver=self,
value='')
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param zone_id: Zone domain name (e.g. example.com)
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
action = '/app/dns/addzone.jsp?'
params = {'name': domain}
try:
self.connection.request(action=action, params=params)
except ZonomiException as e:
if e.message == 'ERROR: This zone is already in your zone list.':
raise ZoneAlreadyExistsError(zone_id=domain, driver=self,
value=e.message)
raise e
zone = Zone(id=domain, domain=domain, type='master', ttl=ttl,
driver=self, extra=extra)
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, MX, TXT).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific, e.g. 'prio' or 'ttl').
(optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
action = '/app/dns/dyndns.jsp?'
if name:
record_name = name + '.' + zone.domain
else:
record_name = zone.domain
params = {'action': 'SET', 'name': record_name, 'value': data,
'type': type}
if type == 'MX' and extra is not None:
params['prio'] = extra.get('prio')
try:
response = self.connection.request(action=action, params=params)
except ZonomiException as e:
if ('ERROR: No zone found for %s' % record_name) in e.message:
raise ZoneDoesNotExistError(zone_id=zone.id, driver=self,
value=e.message)
raise e
# we determine if an A or MX record already exists
# by looking at the response.If the key 'skipped' is present in the
# response, it means record already exists. If this is True,
# then raise RecordAlreadyExistsError
if len(response.objects) != 0 and \
response.objects[0].get('skipped') == 'unchanged':
raise RecordAlreadyExistsError(record_id=name, driver=self,
value='')
if 'DELETED' in response.objects:
for el in response.objects[:2]:
if el.get('content') == data:
response.objects = [el]
records = self._to_records(response.objects, zone=zone)
return records[0]
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
action = '/app/dns/dyndns.jsp?'
params = {'action': 'DELETEZONE', 'name': zone.id}
try:
response = self.connection.request(action=action, params=params)
except ZonomiException as e:
if e.code == '404':
raise ZoneDoesNotExistError(zone_id=zone.id, driver=self,
value=e.message)
raise e
return 'DELETED' in response.objects
def delete_record(self, record):
"""
Use this method to delete a record.
:param record: record to delete
:type record: `Record`
:rtype: Bool
"""
action = '/app/dns/dyndns.jsp?'
params = {'action': 'DELETE', 'name': record.name, 'type': record.type}
try:
response = self.connection.request(action=action, params=params)
except ZonomiException as e:
if e.message == 'Record not deleted.':
raise RecordDoesNotExistError(record_id=record.id, driver=self,
value=e.message)
raise e
return 'DELETED' in response.objects
def ex_convert_to_secondary(self, zone, master):
"""
Convert existent zone to slave.
:param zone: Zone to convert.
:type zone: :class:`Zone`
:param master: the specified master name server IP address.
:type master: ``str``
:rtype: Bool
"""
action = '/app/dns/converttosecondary.jsp?'
params = {'name': zone.domain, 'master': master}
try:
self.connection.request(action=action, params=params)
except ZonomiException as e:
if 'ERROR: Could not find' in e.message:
raise ZoneDoesNotExistError(zone_id=zone.id, driver=self,
value=e.message)
return True
def ex_convert_to_master(self, zone):
"""
Convert existent zone to master.
:param zone: Zone to convert.
:type zone: :class:`Zone`
:rtype: Bool
"""
action = '/app/dns/converttomaster.jsp?'
params = {'name': zone.domain}
try:
self.connection.request(action=action, params=params)
except ZonomiException as e:
if 'ERROR: Could not find' in e.message:
raise ZoneDoesNotExistError(zone_id=zone.id, driver=self,
value=e.message)
return True
def _to_zone(self, item):
if item['type'] == 'NATIVE':
type = 'master'
elif item['type'] == 'SLAVE':
type = 'slave'
zone = Zone(id=item['name'], domain=item['name'], type=type,
driver=self, extra={}, ttl=None)
return zone
def _to_zones(self, items):
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_record(self, item, zone):
if len(item.get('ttl')) > 0:
ttl = item.get('ttl').split(' ')[0]
else:
ttl = None
extra = {'ttl': ttl,
'prio': item.get('prio')}
if len(item['name']) > len(zone.domain):
full_domain = item['name']
index = full_domain.index('.' + zone.domain)
record_name = full_domain[:index]
else:
record_name = zone.domain
record = Record(id=record_name, name=record_name,
data=item['content'], type=item['type'], zone=zone,
driver=self, ttl=ttl, extra=extra)
return record
def _to_records(self, items, zone):
records = []
for item in items:
records.append(self._to_record(item, zone))
return records
| {
"content_hash": "d3592449598b657028f78951fea92efd",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 79,
"avg_line_length": 31.51212121212121,
"alnum_prop": 0.5400519280700067,
"repo_name": "ByteInternet/libcloud",
"id": "4f0eea0751ea1d80ed28fd58560fbf8297cf08e5",
"size": "11172",
"binary": false,
"copies": "3",
"ref": "refs/heads/byte",
"path": "libcloud/dns/drivers/zonomi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "7579600"
},
{
"name": "Shell",
"bytes": "5936"
}
],
"symlink_target": ""
} |
import sys # for reading from argv
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
conn = boto.sqs.connect_to_region("eu-west-1", aws_access_key_id='xxIAINXYPLZEZUALDFYQ', aws_secret_access_key='xxfZms2LJR39mi/W3eWBSGs0rD6dgfC9Q8lcCPRV')
#if the queue name is given: proceed
if(len(sys.argv) == 2):
q = conn.get_queue(sys.argv[1]) #queue name in argv
num = q.count()
if (num > 0): print("No. of messages: "+str(num))
else: print("No messages on queue")
else:
print("FAIL: A name is needed (as argument) for the Queue.")
| {
"content_hash": "f328ed31feb9b9ba502c5153c9c39707",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 154,
"avg_line_length": 35.388888888888886,
"alnum_prop": 0.7409733124018838,
"repo_name": "mcgettin/dockerLab",
"id": "2a6a302fa8bbe6f279568f22e9e156a997e26503",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awsPy/numQmsgs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13402"
},
{
"name": "Shell",
"bytes": "1699"
}
],
"symlink_target": ""
} |
"""
rdf tools package
"""
from .helpers import *
from .association import *
| {
"content_hash": "a938313a4d6701cf94919e5eadd00a17",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 26,
"avg_line_length": 12.833333333333334,
"alnum_prop": 0.6883116883116883,
"repo_name": "SemanticComputing/rdf_dm",
"id": "178ad0fd8f6a67450f8825aeb7857e206733d386",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdf_dm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10155"
}
],
"symlink_target": ""
} |
"""
The `Chain` class inherits from `Link` and allow chaining together several
applications into a single object.
"""
from __future__ import absolute_import, division, print_function
import sys
import os
from collections import OrderedDict
from fermipy.jobs.utils import is_null, is_not_null
from fermipy.jobs.link import Link, extract_arguments
from fermipy.jobs.file_archive import FileStageManager
from fermipy.jobs.job_archive import JobStatus, JobStatusVector,\
JobDetails, JOB_STATUS_STRINGS
def purge_dict(idict):
"""Remove null items from a dictionary """
odict = {}
for key, val in idict.items():
if is_null(val):
continue
odict[key] = val
return odict
class Chain(Link):
"""An object tying together a series of applications into a
single application.
This class keep track of the arguments to pass to the applications
as well as input and output files.
Note that this class is itself a `Link`. This allows you
to write a python module that implements a chain and also has a
__main__ function to allow it to be called from the shell.
"""
def __init__(self, **kwargs):
"""C'tor """
super(Chain, self).__init__(**kwargs)
self._links = OrderedDict()
@classmethod
def main(cls):
"""Hook to run this `Chain` from the command line """
chain = cls.create()
args = chain._run_argparser(sys.argv[1:])
chain._run_chain(sys.stdout, args.dry_run)
chain._finalize(args.dry_run)
@property
def links(self):
"""Return the `OrderedDict` of `Link` objects owned by this `Chain` """
return self._links
@property
def linknames(self):
"""Return the name of the `Link` objects owned by this `Chain` """
return self._links.keys()
def __getitem__(self, key):
"""Return the `Link` whose linkname is key"""
return self._links[key]
def _latch_file_info(self):
"""Internal function to update the dictionaries
keeping track of input and output files
"""
self._map_arguments(self.args)
self.files.latch_file_info(self.args)
self.sub_files.file_dict.clear()
self.sub_files.update(self.files.file_dict)
for link in self._links.values():
self.sub_files.update(link.files.file_dict)
self.sub_files.update(link.sub_files.file_dict)
def _map_arguments(self, args):
"""Map arguments from the top-level `Chain` options
to the options of `Link` object contained within.
Note in many cases this function will also
decide what set of `Link` objects to create.
Parameters
-------------
args : dict
The current values of the options of the top-level `Chain`
"""
raise NotImplementedError('Chain._map_arguments')
def _set_link(self, linkname, cls, **kwargs):
"""Transfer options kwargs to a `Link` object,
optionally building the `Link if needed.
Parameters
----------
linkname : str
Unique name of this particular link
cls : type
Type of `Link` being created or managed
"""
val_copy = purge_dict(kwargs.copy())
sub_link_prefix = val_copy.pop('link_prefix', '')
link_prefix = self.link_prefix + sub_link_prefix
create_args = dict(linkname=linkname,
link_prefix=link_prefix,
job_archive=val_copy.pop('job_archive', None),
file_stage=val_copy.pop('file_stage', None))
job_args = val_copy
if linkname in self._links:
link = self._links[linkname]
link.update_args(job_args)
else:
link = cls.create(**create_args)
self._links[link.linkname] = link
logfile_default = os.path.join('logs', '%s.log' % link.full_linkname)
logfile = kwargs.setdefault('logfile', logfile_default)
link._register_job(JobDetails.topkey, job_args,
logfile, status=JobStatus.unknown)
return link
def _set_links_job_archive(self):
"""Pass self._job_archive along to links"""
for link in self._links.values():
link._job_archive = self._job_archive
def _run_chain(self,
stream=sys.stdout,
dry_run=False,
stage_files=True,
force_run=False,
resubmit_failed=False):
"""Run all the links in the chain
Parameters
-----------
stream : `file`
Stream to print to,
Must have 'write' function
dry_run : bool
Print commands but do not run them
stage_files : bool
Stage files to and from the scratch area
force_run : bool
Run jobs, even if they are marked as done
resubmit_failed : bool
Resubmit failed jobs
"""
self._set_links_job_archive()
failed = False
if self._file_stage is not None:
input_file_mapping, output_file_mapping = self._map_scratch_files(
self.sub_files)
if stage_files:
self._file_stage.make_scratch_dirs(input_file_mapping, dry_run)
self._file_stage.make_scratch_dirs(
output_file_mapping, dry_run)
self._stage_input_files(input_file_mapping, dry_run)
for link in self._links.values():
logfile = os.path.join('logs', "%s.log" % link.full_linkname)
link._archive_self(logfile, status=JobStatus.unknown)
key = JobDetails.make_fullkey(link.full_linkname)
if hasattr(link, 'check_status'):
link.check_status(stream, no_wait=True,
check_once=True, do_print=False)
else:
pass
link_status = link.check_job_status(key)
if link_status in [JobStatus.done]:
if not force_run:
print ("Skipping done link", link.full_linkname)
continue
elif link_status in [JobStatus.running]:
if not force_run and not resubmit_failed:
print ("Skipping running link", link.full_linkname)
continue
elif link_status in [JobStatus.failed,
JobStatus.partial_failed]:
if not resubmit_failed:
print ("Skipping failed link", link.full_linkname)
continue
print ("Running link ", link.full_linkname)
link.run_with_log(dry_run=dry_run, stage_files=False,
resubmit_failed=resubmit_failed)
link_status = link.check_jobs_status()
link._set_status_self(status=link_status)
if link_status in [JobStatus.failed, JobStatus.partial_failed]:
print ("Stoping chain execution at failed link %s" %
link.full_linkname)
failed = True
break
# elif link_status in [JobStatus.partial_failed]:
# print ("Resubmitting partially failed link %s" %
# link.full_linkname)
# link.run_with_log(dry_run=dry_run, stage_files=False,
# resubmit_failed=resubmit_failed)
# link_status = link.check_jobs_status()
# link._set_status_self(status=link_status)
# if link_status in [JobStatus.partial_failed]:
# print ("Stoping chain execution: resubmission failed %s" %
# link.full_linkname)
# failed = True
# break
if self._file_stage is not None and stage_files and not failed:
self._stage_output_files(output_file_mapping, dry_run)
chain_status = self.check_links_status()
print ("Chain status: %s" % (JOB_STATUS_STRINGS[chain_status]))
if chain_status == 5:
job_status = 0
else:
job_status = -1
self._write_status_to_log(job_status, stream)
self._set_status_self(status=chain_status)
if self._job_archive:
self._job_archive.file_archive.update_file_status()
self._job_archive.write_table_file()
def clear_jobs(self, recursive=True):
"""Clear a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
for link in self._links.values():
link.clear_jobs(recursive)
self.jobs.clear()
def get_jobs(self, recursive=True):
"""Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
ret_dict = self.jobs.copy()
for link in self._links.values():
ret_dict.update(link.get_jobs(recursive))
return ret_dict
return self.jobs
def missing_input_files(self):
"""Make and return a dictionary of the missing input files.
This returns a dictionary mapping
filepath to list of `Link` that use the file as input.
"""
ret_dict = OrderedDict()
for link in self._links.values():
link_dict = link.missing_input_files()
for key, value in link_dict.items():
try:
ret_dict[key] += value
except KeyError:
ret_dict[key] = value
return ret_dict
def missing_output_files(self):
"""Make and return a dictionary of the missing output files.
This returns a dictionary mapping
filepath to list of links that produce the file as output.
"""
ret_dict = OrderedDict()
for link in self._links.values():
link_dict = link.missing_output_files()
for key, value in link_dict.items():
try:
ret_dict[key] += value
except KeyError:
ret_dict[key] = value
return ret_dict
def check_links_status(self,
fail_running=False,
fail_pending=False):
""""Check the status of all the jobs run from the
`Link` objects in this `Chain` and return a status
flag that summarizes that.
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
status : `JobStatus`
Job status flag that summarizes the status of all the jobs,
"""
status_vector = JobStatusVector()
for link in self._links.values():
key = JobDetails.make_fullkey(link.full_linkname)
link_status = link.check_job_status(key,
fail_running=fail_running,
fail_pending=fail_pending)
status_vector[link_status] += 1
return status_vector.get_status()
def run(self, stream=sys.stdout, dry_run=False,
stage_files=True, resubmit_failed=False):
"""Runs this `Chain`.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
Must have 'write' function
dry_run : bool
Print command but do not run it.
stage_files : bool
Copy files to and from scratch staging area.
resubmit_failed : bool
Flag for sub-classes to resubmit failed jobs.
"""
self._run_chain(stream, dry_run, stage_files,
resubmit_failed=resubmit_failed)
def update_args(self, override_args):
"""Update the argument used to invoke the application
Note that this will also update the dictionary of input
and output files.
Parameters
-----------
override_args : dict
dictionary passed to the links
"""
self.args = extract_arguments(override_args, self.args)
self._map_arguments(self.args)
scratch_dir = self.args.get('scratch', None)
if is_not_null(scratch_dir):
self._file_stage = FileStageManager(scratch_dir, '.')
for link in self._links.values():
link._set_file_stage(self._file_stage)
self._latch_file_info()
def print_status(self, indent="", recurse=False):
"""Print a summary of the job status for each `Link` in this `Chain`"""
print ("%s%30s : %15s : %20s" %
(indent, "Linkname", "Link Status", "Jobs Status"))
for link in self._links.values():
if hasattr(link, 'check_status'):
status_vect = link.check_status(
stream=sys.stdout, no_wait=True, do_print=False)
else:
status_vect = None
key = JobDetails.make_fullkey(link.full_linkname)
link_status = JOB_STATUS_STRINGS[link.check_job_status(key)]
if status_vect is None:
jobs_status = JOB_STATUS_STRINGS[link.check_jobs_status()]
else:
jobs_status = status_vect
print ("%s%30s : %15s : %20s" %
(indent, link.linkname, link_status, jobs_status))
if hasattr(link, 'print_status') and recurse:
print ("---------- %30s -----------" % link.linkname)
link.print_status(indent + " ", recurse=True)
print ("------------------------------------------------")
def print_summary(self, stream=sys.stdout, indent="", recurse_level=2):
"""Print a summary of the activity done by this `Chain`.
Parameters
-----------
stream : `file`
Stream to print to, must have 'write' method.
indent : str
Indentation at start of line
recurse_level : int
Number of recursion levels to print
"""
Link.print_summary(self, stream, indent, recurse_level)
if recurse_level > 0:
recurse_level -= 1
indent += " "
for link in self._links.values():
stream.write("\n")
link.print_summary(stream, indent, recurse_level)
def run_analysis(self, argv):
"""Implemented by sub-classes to run a particular analysis"""
raise RuntimeError("run_analysis called for Chain type object")
| {
"content_hash": "fe142a0dd7c0e910961271ee0790294a",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 81,
"avg_line_length": 35.93253012048193,
"alnum_prop": 0.5519045064377682,
"repo_name": "jefemagril/fermipy",
"id": "bacccdfafeddbcd8f05fcf23dac9880acb6930d0",
"size": "14976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fermipy/jobs/chain.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "17025"
},
{
"name": "Python",
"bytes": "1703049"
},
{
"name": "Ruby",
"bytes": "10647"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap"
_path_str = "treemap.domain"
_valid_props = {"column", "row", "x", "y"}
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this treemap trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this treemap trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this treemap trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this treemap trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
_v = column if column is not None else _v
if _v is not None:
self["column"] = _v
_v = arg.pop("row", None)
_v = row if row is not None else _v
if _v is not None:
self["row"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "d6ae1d5cb6f434314b26b9c6efd66b7c",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 82,
"avg_line_length": 27.806763285024154,
"alnum_prop": 0.5062543432939541,
"repo_name": "plotly/python-api",
"id": "7a49e2974d5f54e8b95ab51a8f075900aa3376b4",
"size": "5756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/treemap/_domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""\
Test against issue #25.
<https://github.com/heuer/cablemap/issues/25>
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD license
"""
from nose.tools import eq_
from cablemap.core import cable_by_id
def test_issue25():
cable = cable_by_id('09NAIROBI1938')
refs = [ref.value for ref in cable.references if ref.is_cable()]
eq_([u'08STATE81854', u'09NAIROBI1830', u'09NAIROBI1859', u'09NAIROBI1831'], refs)
if __name__ == '__main__':
import nose
nose.core.runmodule()
| {
"content_hash": "aa390a4e64fd7d89fbc946b0b93f0a63",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 86,
"avg_line_length": 28.05,
"alnum_prop": 0.6702317290552585,
"repo_name": "heuer/cablemap",
"id": "4f3ee6fd4f663981c8e2f8a7fbad5dade4da78c0",
"size": "729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cablemap.core/tests/test_issue25.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "358392"
},
{
"name": "Python",
"bytes": "443537"
}
],
"symlink_target": ""
} |
from frappe import _
from frappe.utils import get_fullname, now
from frappe.model.document import Document
from frappe.core.utils import set_timeline_doc
import frappe
from frappe.query_builder import DocType, Interval
from frappe.query_builder.functions import Now
from pypika.terms import PseudoColumn
class ActivityLog(Document):
def before_insert(self):
self.full_name = get_fullname(self.user)
self.date = now()
def validate(self):
self.set_status()
set_timeline_doc(self)
def set_status(self):
if not self.is_new():
return
if self.reference_doctype and self.reference_name:
self.status = "Linked"
def on_doctype_update():
"""Add indexes in `tabActivity Log`"""
frappe.db.add_index("Activity Log", ["reference_doctype", "reference_name"])
frappe.db.add_index("Activity Log", ["timeline_doctype", "timeline_name"])
frappe.db.add_index("Activity Log", ["link_doctype", "link_name"])
def add_authentication_log(subject, user, operation="Login", status="Success"):
frappe.get_doc({
"doctype": "Activity Log",
"user": user,
"status": status,
"subject": subject,
"operation": operation,
}).insert(ignore_permissions=True, ignore_links=True)
def clear_activity_logs(days=None):
"""clear 90 day old authentication logs or configured in log settings"""
if not days:
days = 90
doctype = DocType("Activity Log")
frappe.db.delete(doctype, filters=(
doctype.creation < PseudoColumn(f"({Now() - Interval(days=days)})")
)) | {
"content_hash": "26f105ccb50b16d26414e1d71e41d159",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 29.918367346938776,
"alnum_prop": 0.7257844474761255,
"repo_name": "almeidapaulopt/frappe",
"id": "69565a2c2ad59b832b9e95170d65b8544a2bd54b",
"size": "1578",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/activity_log/activity_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "245760"
},
{
"name": "JavaScript",
"bytes": "2345089"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3436599"
},
{
"name": "SCSS",
"bytes": "248606"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import warnings
import py4j
from contextlib import contextmanager
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.util import _exception_message
_pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
_pandas_requirement_message = _exception_message(e)
_pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
_pyarrow_requirement_message = _exception_message(e)
_test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
_test_not_compiled_message = _exception_message(e)
_have_pandas = _pandas_requirement_message is None
_have_pyarrow = _pyarrow_requirement_message is None
_test_compiled = _test_not_compiled_message is None
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.sql.types import _merge_type
from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
def test_invalid_create_row(self):
row_class = Row("c1", "c2")
self.assertRaises(ValueError, lambda: row_class(1, 2, 3))
class SQLTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedSQLTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
from pyspark.sql.functions import udf
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
from pyspark.sql.functions import udf
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import udf, sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initialization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise Exception("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise Exception("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dateframe_from_pandas_with_dst(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEquals(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)]))
def test_checking_csv_header(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.createDataFrame([[1, 1000], [2000, 2]])\
.toDF('f1', 'f2').write.option("header", "true").csv(path)
schema = StructType([
StructField('f2', IntegerType(), nullable=True),
StructField('f1', IntegerType(), nullable=True)])
df = self.spark.read.option('header', 'true').schema(schema)\
.csv(path, enforceSchema=False)
self.assertRaisesRegexp(
Exception,
"CSV header does not conform to the schema",
lambda: df.collect())
finally:
shutil.rmtree(path)
def test_ignore_column_of_all_nulls(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
df = self.spark.createDataFrame([["""{"a":null, "b":1, "c":3.0}"""],
["""{"a":null, "b":null, "c":"string"}"""],
["""{"a":null, "b":null, "c":null}"""]])
df.write.text(path)
schema = StructType([
StructField('b', LongType(), nullable=True),
StructField('c', StringType(), nullable=True)])
readback = self.spark.read.json(path, dropFieldIfAllNull=True)
self.assertEquals(readback.schema, schema)
finally:
shutil.rmtree(path)
# SPARK-24721
@unittest.skipIf(not _test_compiled, _test_not_compiled_message)
def test_datasource_with_udf(self):
from pyspark.sql.functions import udf, lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = udf(lambda x: x + 1, 'int')(lit(1))
c2 = udf(lambda x: x + 1, 'int')(col('i'))
f1 = udf(lambda x: False, 'boolean')(lit(1))
f2 = udf(lambda x: False, 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
class HiveSparkSubmitTests(SparkSubmitTests):
@classmethod
def setUpClass(cls):
# get a SparkContext to check for availability of Hive
sc = SparkContext('local[4]', cls.__name__)
cls.hive_available = True
try:
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
finally:
# we don't need this SparkContext for the test
sc.stop()
def setUp(self):
super(HiveSparkSubmitTests, self).setUp()
if not self.hive_available:
self.skipTest("Hive is not available.")
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
class SparkSessionTests(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_context.stop()
def test_udf_init_shouldnt_initialize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
import pyarrow as pa
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion("0.10.0") <= LooseVersion(pa.__version__):
cls.schema.add(StructField("9_binary_t", BinaryType(), True))
cls.data[0] = cls.data[0] + (bytearray(b"a"),)
cls.data[1] = cls.data[1] + (bytearray(b"bb"),)
cls.data[2] = cls.data[2] + (bytearray(b"ccc"),)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with warnings.catch_warnings(record=True) as warns:
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
from distutils.version import LooseVersion
import pyarrow as pa
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
schema = StructType([StructField("binary", BinaryType(), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type.*BinaryType'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[7] = fields[7], fields[0] # swap str with timestamp
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type.*BinaryType'):
self.spark.createDataFrame(
pd.DataFrame([[{'a': b'aaa'}]]), "a: binary")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType, StructField, DoubleType
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from py4j.protocol import Py4JJavaError
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
from pyspark.sql.functions import pandas_udf
@pandas_udf('double')
def random_udf(v):
import pandas as pd
import numpy as np
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_pandas_udf_tokenize(self):
from pyspark.sql.functions import pandas_udf
tokenize = pandas_udf(lambda s: s.apply(lambda str: str.split(' ')),
ArrayType(StringType()))
self.assertEqual(tokenize.returnType, ArrayType(StringType()))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[u'hi', u'boo']), Row(hi=[u'bye', u'boo'])], result.collect())
def test_pandas_udf_nested_arrays(self):
from pyspark.sql.functions import pandas_udf
tokenize = pandas_udf(lambda s: s.apply(lambda str: [str.split(' ')]),
ArrayType(ArrayType(StringType())))
self.assertEqual(tokenize.returnType, ArrayType(ArrayType(StringType())))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[[u'hi', u'boo']]), Row(hi=[[u'bye', u'boo']])], result.collect())
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col, array
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf
from pyspark.rdd import PythonEvalType
import random
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
from decimal import Decimal
from pyspark.sql.functions import pandas_udf, col
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.functions import pandas_udf, col
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
else:
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_varargs(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
def test_vectorized_udf_dates(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import date
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf, pandas_udf, col
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
from pyspark.sql.functions import pandas_udf, sum
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
import pandas as pd
from pyspark.sql.functions import col, udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
import pandas as pd
from pyspark.sql import Column
from pyspark.sql.functions import udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
# SPARK-24721
@unittest.skipIf(not _test_compiled, _test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import pandas as pd
import numpy as np
from pyspark.sql.functions import pandas_udf, lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id")))
# Different forms of group map pandas UDF, results of these are the same
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType())),
StructField('v1', DoubleType()),
StructField('v2', LongType())])
udf1 = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_array_type_correct(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_register_grouped_map_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or '
'SQL_SCALAR_PANDAS_UDF'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
schema = StructType(
[StructField("id", LongType(), True),
StructField("arr_ts", ArrayType(TimestampType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
def test_column_order(self):
from collections import OrderedDict
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id')
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "No cast implemented"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
with self.sql_conf({"spark.sql.execution.pandas.groupedMap.assignColumnsByPosition": True}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
import pyspark.sql.functions as F
@F.pandas_udf('key long, col string', F.PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
F.col('temp0.key') == F.col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
import pandas as pd
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
from pyspark.sql.functions import pandas_udf, array
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
from pyspark.sql.types import DoubleType, MapType
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
from pyspark.sql.functions import mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
from pyspark.sql.functions import sum, mean
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
from pyspark.sql.functions import sum, mean
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
from pyspark.sql.functions import lit, sum
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
from pyspark.sql.functions import col, sum
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
from pyspark.sql.functions import sum, lit, col
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
def test_simple(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, percent_rank, mean, max
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
from pyspark.sql.functions import max, min, mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
from pyspark.sql.functions import max, min, rank, col
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean, pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
ow = self.ordered_window
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*Only unbounded window frame is supported.*'):
df.withColumn('mean_v', mean_udf(df['v']).over(ow))
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| {
"content_hash": "4ce96bed8cb6a687ff911d3c454407c2",
"timestamp": "",
"source": "github",
"line_count": 6501,
"max_line_length": 100,
"avg_line_length": 42.64851561298262,
"alnum_prop": 0.5775162484040136,
"repo_name": "rekhajoshm/spark",
"id": "8e5bc6729dfa44be94a978b9e8d0025d0a011ff0",
"size": "278093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "35042"
},
{
"name": "Batchfile",
"bytes": "30285"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23956"
},
{
"name": "Dockerfile",
"bytes": "8266"
},
{
"name": "HTML",
"bytes": "65141"
},
{
"name": "HiveQL",
"bytes": "1823426"
},
{
"name": "Java",
"bytes": "3367711"
},
{
"name": "JavaScript",
"bytes": "144886"
},
{
"name": "Makefile",
"bytes": "9395"
},
{
"name": "PLpgSQL",
"bytes": "163419"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2742712"
},
{
"name": "R",
"bytes": "1138726"
},
{
"name": "Roff",
"bytes": "20534"
},
{
"name": "SQLPL",
"bytes": "30039"
},
{
"name": "Scala",
"bytes": "27391370"
},
{
"name": "Shell",
"bytes": "191511"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
} |
"""Common classes and functions for zones."""
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.core.util import console_io
class ZoneResourceFetcher(object):
"""Mixin class for working with zones."""
def GetZones(self, resource_refs):
"""Fetches zone resources."""
errors = []
requests = []
zone_names = set()
for resource_ref in resource_refs:
if resource_ref.zone not in zone_names:
zone_names.add(resource_ref.zone)
requests.append((
self.compute.zones,
'Get',
self.messages.ComputeZonesGetRequest(
project=self.project,
zone=resource_ref.zone)))
res = list(request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
return None
else:
return res
def WarnForZonalCreation(self, resource_refs):
"""Warns the user if a zone has upcoming maintanence or deprecation."""
zones = self.GetZones(resource_refs)
if not zones:
return
prompts = []
zones_with_upcoming_maintenance = []
zones_with_deprecated = []
for zone in zones:
if zone.maintenanceWindows:
zones_with_upcoming_maintenance.append(zone)
if zone.deprecated:
zones_with_deprecated.append(zone)
if not zones_with_upcoming_maintenance and not zones_with_deprecated:
return
if zones_with_upcoming_maintenance:
phrases = []
if len(zones_with_upcoming_maintenance) == 1:
phrases = ('a zone', 'window is')
else:
phrases = ('zones', 'windows are')
title = ('You have selected {0} with upcoming '
'maintenance. During maintenance, resources are '
'temporarily unavailible. The next scheduled '
'{1} as follows:'.format(phrases[0], phrases[1]))
printable_maintenance_zones = []
for zone in zones_with_upcoming_maintenance:
next_event = min(zone.maintenanceWindows, key=lambda x: x.beginTime)
window = '[{0}]: {1} -- {2}'.format(zone.name,
next_event.beginTime,
next_event.endTime)
printable_maintenance_zones.append(window)
prompts.append(utils.ConstructList(title, printable_maintenance_zones))
if zones_with_deprecated:
phrases = []
if len(zones_with_deprecated) == 1:
phrases = ('zone is', 'this zone', 'the')
else:
phrases = ('zones are', 'these zones', 'their')
title = ('\n'
'WARNING: The following selected {0} deprecated.'
' All resources in {1} will be deleted after'
' {2} turndown date.'.format(phrases[0], phrases[1], phrases[2]))
printable_deprecated_zones = []
for zone in zones_with_deprecated:
if zone.deprecated.deleted:
printable_deprecated_zones.append(('[{0}] {1}').format(zone.name,
zone.deprecated
.deleted))
else:
printable_deprecated_zones.append('[{0}]'.format(zone.name))
prompts.append(utils.ConstructList(title, printable_deprecated_zones))
final_message = ' '.join(prompts)
if not console_io.PromptContinue(message=final_message):
raise calliope_exceptions.ToolException('Creation aborted by user.')
| {
"content_hash": "634bb0700b6aaf245d177311c4e03781",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 80,
"avg_line_length": 37.824742268041234,
"alnum_prop": 0.6009811937857726,
"repo_name": "ychen820/microblog",
"id": "0143a807af7c28e2e5abdcfe78f8b34e8b1b34b7",
"size": "3719",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/lib/googlecloudsdk/compute/lib/zone_utils.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from bitstream import *
import types_python_primitives
import types_numpy
| {
"content_hash": "21e21693a959ad1b213f472aca151927",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.8378378378378378,
"repo_name": "devast8a/bitstream_python",
"id": "04cc1c4ea7eddf281db797b317f9a62bf677dac7",
"size": "74",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bitstream_python/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21739"
}
],
"symlink_target": ""
} |
"""The test for light device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.light import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a light."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
for trigger in ["changed_states", "turned_off", "turned_on"]
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_triggers_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected triggers from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for trigger in ["changed_states", "turned_off", "turned_on"]
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers)
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a light trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls, enable_custom_integrations):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
await hass.async_block_till_done()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "changed_states",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on_or_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 2
assert {calls[0].data["some"], calls[1].data["some"]} == {
f"turn_off device - {ent1.entity_id} - on - off - None",
f"turn_on_or_off device - {ent1.entity_id} - on - off - None",
}
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 4
assert {calls[2].data["some"], calls[3].data["some"]} == {
f"turn_on device - {ent1.entity_id} - off - on - None",
f"turn_on_or_off device - {ent1.entity_id} - off - on - None",
}
async def test_if_fires_on_state_change_with_for(
hass, calls, enable_custom_integrations
):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
await hass.async_block_till_done()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
ent1.entity_id
)
| {
"content_hash": "c5f088a25b25e88d8794e15053fbfcff",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 87,
"avg_line_length": 35.204402515723274,
"alnum_prop": 0.4884323358642251,
"repo_name": "mezz64/home-assistant",
"id": "6cedb51f661261673277b4024156808833359d16",
"size": "11195",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/light/test_device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import json
import sys
class TwOrder:
# Default Values
conf = {
'user': '',
'query': '',
'since': '',
'until': '',
'max_tweets': 0,
'max_comments': 0,
'bufferlength': 0,
'near': '',
'within': '',
'lang': 'en'
}
@staticmethod
def order(*args, **kwargs):
# Using configuration filename parameter
if len(args) == 1:
filename = args[0]
with open(filename, 'r') as f:
TwOrder.conf = json.load(f)
# Using KV parameters
else:
if 'user' in kwargs:
TwOrder.conf['user'] = kwargs['user']
if 'query' in kwargs:
TwOrder.conf['query'] = kwargs['query']
if 'since' in kwargs:
TwOrder.conf['since'] = kwargs['since']
if 'until' in kwargs:
TwOrder.conf['until'] = kwargs['until']
if 'max_tweets' in kwargs:
TwOrder.conf['max_tweets'] = kwargs['max_tweets']
if 'max_comments' in kwargs:
TwOrder.conf['max_comments'] = kwargs['max_comments']
if 'bufferlength' in kwargs:
TwOrder.conf['bufferlength'] = kwargs['bufferlength']
if 'near' in kwargs:
TwOrder.conf['near'] = kwargs['near']
if 'within' in kwargs:
TwOrder.conf['within'] = kwargs['within']
if 'lang' in kwargs:
TwOrder.conf['lang'] = kwargs['lang']
if len(TwOrder.conf['query']) == 0 and len(TwOrder.conf['user']) == 0:
print "Parameter query and user cannot be empty simutaneously!\nUsage: TwOrder(query=\"Father's Day\")"
sys.exit(1)
| {
"content_hash": "ad00f121529c268dc9a4ff4011f7bd00",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 115,
"avg_line_length": 34.09615384615385,
"alnum_prop": 0.4873096446700508,
"repo_name": "mutux/TwEater",
"id": "3124715f9a5ae0d0c660b4edea2bf42b07ca7ee5",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweater/tworder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13186"
}
],
"symlink_target": ""
} |
Subsets and Splits