repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rantonmattei/garcon | tests/test_decider.py | 1 | 1045 | from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
| mit | -4,788,066,268,114,450,000 | 28.857143 | 78 | 0.722488 | false |
jferreir/mbed | workspace_tools/host_tests/mbedrpc.py | 111 | 7047 | # mbedRPC.py - mbed RPC interface for Python
#
##Copyright (c) 2010 ARM Ltd
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in
##all copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
##THE SOFTWARE.
#
# Example:
# >from mbedRPC import*
# >mbed = SerialRPC("COM5",9600)
# >myled = DigitalOut(mbed,"myled") <--- Where the text in quotations matches your RPC pin definition's second parameter, in this case it could be RpcDigitalOut myled(LED1,"myled");
# >myled.write(1)
# >
import serial, urllib2, time
# mbed super class
class mbed:
def __init__(self):
print("This will work as a demo but no transport mechanism has been selected")
def rpc(self, name, method, args):
print("Superclass method not overridden")
# Transport mechanisms, derived from mbed
class SerialRPC(mbed):
def __init__(self, port, baud):
self.ser = serial.Serial(port)
self.ser.setBaudrate(baud)
def rpc(self, name, method, args):
# creates the command to be sent serially - /name/method arg1 arg2 arg3 ... argN
str = "/" + name + "/" + method + " " + " ".join(args) + "\n"
# prints the command being executed
print str
# writes the command to serial
self.ser.write(str)
# strips trailing characters from the line just written
ret_val = self.ser.readline().strip()
return ret_val
class HTTPRPC(mbed):
def __init__(self, ip):
self.host = "http://" + ip
def rpc(self, name, method, args):
response = urllib2.urlopen(self.host + "/rpc/" + name + "/" + method + "%20" + "%20".join(args))
return response.read().strip()
# generic mbed interface super class
class mbed_interface():
# initialize an mbed interface with a transport mechanism and pin name
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def new(self, class_name, name, pin1, pin2 = "", pin3 = ""):
args = [arg for arg in [pin1,pin2,pin3,name] if arg != ""]
r = self.mbed.rpc(class_name, "new", args)
# generic read
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return int(r)
# for classes that need write functionality - inherits from the generic reading interface
class mbed_interface_write(mbed_interface):
def __init__(self, this_mbed, mpin):
mbed_interface.__init__(self, this_mbed, mpin)
# generic write
def write(self, value):
r = self.mbed.rpc(self.name, "write", [str(value)])
# mbed interfaces
class DigitalOut(mbed_interface_write):
def __init__(self, this_mbed, mpin):
mbed_interface_write.__init__(self, this_mbed, mpin)
class AnalogIn(mbed_interface):
def __init__(self, this_mbed, mpin):
mbed_interface.__init__(self, this_mbed, mpin)
def read_u16(self):
r = self.mbed.rpc(self.name, "read_u16", [])
return int(r)
class AnalogOut(mbed_interface_write):
def __init__(self, this_mbed, mpin):
mbed_interface_write.__init__(self, this_mbed, mpin)
def write_u16(self, value):
self.mbed.rpc(self.name, "write_u16", [str(value)])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(r)
class DigitalIn(mbed_interface):
def __init__(self, this_mbed, mpin):
mbed_interface.__init__(self, this_mbed, mpin)
class PwmOut(mbed_interface_write):
def __init__(self, this_mbed, mpin):
mbed_interface_write.__init__(self, this_mbed, mpin)
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return r
def period(self, value):
self.mbed.rpc(self.name, "period", [str(value)])
def period_ms(self, value):
self.mbed.rpc(self.name, "period_ms", [str(value)])
def period_us(self, value):
self.mbed.rpc(self.name, "period_us", [str(value)])
def pulsewidth(self, value):
self.mbed.rpc(self.name, "pulsewidth", [str(value)])
def pulsewidth_ms(self, value):
self.mbed.rpc(self.name, "pulsewidth_ms", [str(value)])
def pulsewidth_us(self, value):
self.mbed.rpc(self.name, "pulsewidth_us", [str(value)])
class RPCFunction(mbed_interface):
def __init__(self, this_mbed, name):
mbed_interface.__init__(self, this_mbed, name)
def run(self, input):
r = self.mbed.rpc(self.name, "run", [input])
return r
class RPCVariable(mbed_interface_write):
def __init__(self, this_mbed, name):
mbed_interface_write.__init__(self, this_mbed, name)
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return r
class Timer(mbed_interface):
def __init__(self, this_mbed, name):
mbed_interface.__init__(self, this_mbed, name)
def start(self):
r = self.mbed.rpc(self.name, "start", [])
def stop(self):
r = self.mbed.rpc(self.name, "stop", [])
def reset(self):
r = self.mbed.rpc(self.name, "reset", [])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(re.search('\d+\.*\d*', r).group(0))
def read_ms(self):
r = self.mbed.rpc(self.name, "read_ms", [])
return float(re.search('\d+\.*\d*', r).group(0))
def read_us(self):
r = self.mbed.rpc(self.name, "read_us", [])
return float(re.search('\d+\.*\d*', r).group(0))
# Serial
class Serial():
def __init__(self, this_mbed, tx, rx=""):
self.mbed = this_mbed
if isinstance(tx, str):
self.name = tx
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def baud(self, value):
r = self.mbed.rpc(self.name, "baud", [str(value)])
def putc(self, value):
r = self.mbed.rpc(self.name, "putc", [str(value)])
def puts(self, value):
r = self.mbed.rpc(self.name, "puts", ["\"" + str(value) + "\""])
def getc(self):
r = self.mbed.rpc(self.name, "getc", [])
return int(r)
def wait(s):
time.sleep(s)
| apache-2.0 | 6,615,524,994,291,949,000 | 30.32 | 181 | 0.617426 | false |
moio/spacewalk | backend/upload_server/handlers/package_push/package_push.py | 1 | 5519 | #
# Code that drops files on the filesystem (/PKG-UPLOAD)
#
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import os
import base64
from rhn import rpclib
from spacewalk.common import apache, rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.server import rhnPackageUpload, rhnSQL, basePackageUpload
class PackagePush(basePackageUpload.BasePackageUpload):
def __init__(self, req):
basePackageUpload.BasePackageUpload.__init__(self, req)
self.required_fields.extend([
'Auth',
'Force',
])
self.null_org = None
# Default packaging is rpm
self.packaging = 'rpm'
self.username = None
self.password = None
self.force = None
self.rel_package_path = None
self.org_id = None
self.package_path = None
def headerParserHandler(self, req):
ret = basePackageUpload.BasePackageUpload.headerParserHandler(self, req)
# Optional headers
maps = [['Null-Org', 'null_org'], ['Packaging', 'packaging']]
for hn, sn in maps:
header_name = "%s-%s" % (self.header_prefix, hn)
if req.headers_in.has_key(header_name):
setattr(self, sn, req.headers_in[header_name])
if ret != apache.OK:
return ret
if CFG.SEND_MESSAGE_TO_ALL:
rhnSQL.closeDB()
log_debug(1, "send_message_to_all is set")
rhnFlags.set("apache-return-code", apache.HTTP_NOT_FOUND)
try:
outage_message = open(CFG.MESSAGE_TO_ALL).read()
except IOError:
log_error("Missing outage message file")
outage_message = "Outage mode"
raise rhnFault(20001, outage_message, explain=0)
# Init the database connection
rhnSQL.initDB()
use_session = 0
if self.field_data.has_key('Auth-Session'):
session_token = self.field_data['Auth-Session']
use_session = 1
else:
encoded_auth_token = self.field_data['Auth']
if not use_session:
auth_token = self.get_auth_token(encoded_auth_token)
if len(auth_token) < 2:
log_debug(3, auth_token)
raise rhnFault(105, "Unable to autenticate")
self.username, self.password = auth_token[:2]
force = self.field_data['Force']
force = int(force)
log_debug(1, "Username", self.username, "Force", force)
if use_session:
self.org_id, self.force = rhnPackageUpload.authenticate_session(session_token,
force=force, null_org=self.null_org)
else:
# We don't push to any channels
self.org_id, self.force = rhnPackageUpload.authenticate(self.username,
self.password, force=force, null_org=self.null_org)
return apache.OK
def handler(self, req):
ret = basePackageUpload.BasePackageUpload.handler(self, req)
if ret != apache.OK:
return ret
a_pkg = rhnPackageUpload.save_uploaded_package(req,
(self.package_name, None, self.package_version,
self.package_release, self.package_arch),
str(self.org_id),
self.packaging,
self.file_checksum_type, self.file_checksum)
self.rel_package_path = rhnPackageUpload.relative_path_from_header(
a_pkg.header, org_id=self.org_id,
checksum_type=a_pkg.checksum_type, checksum=a_pkg.checksum)
self.package_path = os.path.join(CFG.MOUNT_POINT,
self.rel_package_path)
package_dict, diff_level = rhnPackageUpload.push_package(a_pkg,
force=self.force,
relative_path=self.rel_package_path, org_id=self.org_id)
if diff_level:
return self._send_package_diff(req, diff_level, package_dict)
# Everything went fine
rhnSQL.commit()
reply = "All OK"
req.headers_out['Content-Length'] = str(len(reply))
req.send_http_header()
req.write(reply)
log_debug(2, "Returning with OK")
return apache.OK
@staticmethod
def _send_package_diff(req, diff_level, diff):
args = {
'level' : diff_level,
'diff' : diff,
}
reply = rpclib.xmlrpclib.dumps((args, ))
ret_stat = apache.HTTP_BAD_REQUEST
req.status = ret_stat
req.err_headers_out['Content-Length'] = str(len(reply))
req.send_http_header()
req.write(reply)
return apache.OK
@staticmethod
def get_auth_token(value):
s = ''.join(map(lambda x: x.strip(), value.split(',')))
arr = map(base64.decodestring, s.split(':'))
return arr
| gpl-2.0 | -5,673,835,696,014,753,000 | 33.93038 | 90 | 0.603914 | false |
bert9bert/statsmodels | statsmodels/sandbox/distributions/tests/__init__.py | 219 | 6354 | '''
Econometrics for a Datarich Environment
=======================================
Introduction
------------
In many cases we are performing statistical analysis when many observed variables are
available, when we are in a data rich environment. Machine learning has a wide variety
of tools for dimension reduction and penalization when there are many varibles compared
to the number of observation. Chemometrics has a long tradition of using Partial Least
Squares, NIPALS and similar in these cases. In econometrics the same problem shows up
when there are either many possible regressors, many (weak) instruments or when there are
a large number of moment conditions in GMM.
This section is intended to collect some models and tools in this area that are relevant
for the statical analysis and econometrics.
Covariance Matrices
===================
Several methods are available to reduce the small sample noise in estimated covariance
matrices with many variable.
Some applications:
weighting matrix with many moments,
covariance matrix for portfolio choice
Dimension Reduction
===================
Principal Component and Partial Least Squares try to extract the important low dimensional
factors from the data with many variables.
Regression with many regressors
===============================
Factor models, selection of regressors and shrinkage and penalization are used to improve
the statistical properties, when the presence of too many regressors leads to over-fitting
and too noisy small sample estimators and statistics.
Regression with many moments or many instruments
================================================
The same tools apply and can be used in these two cases.
e.g. Tychonov regularization of weighting matrix in GMM, similar to Ridge regression, the
weighting matrix can be shrunk towards the identity matrix.
Simplest case will be part of GMM. I don't know how much will be standalone
functions.
Intended Content
================
PLS
---
what should be available in class?
Factormodel and supporting helper functions
-------------------------------------------
PCA based
~~~~~~~~~
First version based PCA on Stock/Watson and Bai/Ng, and recent papers on the
selection of the number of factors. Not sure about Forni et al. in approach.
Basic support of this needs additional results for PCA, error covariance matrix
of data on reduced factors, required for criteria in Bai/Ng.
Selection criteria based on eigenvalue cutoffs.
Paper on PCA and structural breaks. Could add additional results during
find_nfact to test for parameter stability. I haven't read the paper yet.
Idea: for forecasting, use up to h-step ahead endogenous variables to directly
get the forecasts.
Asymptotic results and distribution: not too much idea yet.
Standard OLS results are conditional on factors, paper by Haerdle (abstract
seems to suggest that this is ok, Park 2009).
Simulation: add function to simulate DGP of Bai/Ng and recent extension.
Sensitivity of selection criteria to heteroscedasticity and autocorrelation.
Bai, J. & Ng, S., 2002. Determining the Number of Factors in
Approximate Factor Models. Econometrica, 70(1), pp.191-221.
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Alessi, L., Barigozzi, M. & Capasso, M., 2010. Improved penalization
for determining the number of factors in approximate factor models.
Statistics & Probability Letters, 80(23-24), pp.1806-1813.
Breitung, J. & Eickmeier, S., Testing for structural breaks in dynamic
factor models. Journal of Econometrics, In Press, Accepted Manuscript.
Available at:
http://www.sciencedirect.com/science/article/B6VC0-51G3W92-1/2/f45ce2332443374fd770e42e5a68ddb4
[Accessed November 15, 2010].
Croux, C., Renault, E. & Werker, B., 2004. Dynamic factor models.
Journal of Econometrics, 119(2), pp.223-230.
Forni, M. et al., 2009. Opening the Black Box: Structural Factor
Models with Large Cross Sections. Econometric Theory, 25(05),
pp.1319-1347.
Forni, M. et al., 2000. The Generalized Dynamic-Factor Model:
Identification and Estimation. Review of Economics and Statistics,
82(4), pp.540-554.
Forni, M. & Lippi, M., The general dynamic factor model: One-sided
representation results. Journal of Econometrics, In Press, Accepted
Manuscript. Available at:
http://www.sciencedirect.com/science/article/B6VC0-51FNPJN-1/2/4fcdd0cfb66e3050ff5d19bf2752ed19
[Accessed November 15, 2010].
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Park, B.U. et al., 2009. Time Series Modelling With Semiparametric
Factor Dynamics. Journal of the American Statistical Association,
104(485), pp.284-298.
other factor algorithm
~~~~~~~~~~~~~~~~~~~~~~
PLS should fit in reasonably well.
Bai/Ng have a recent paper, where they compare LASSO, PCA, and similar, individual
and in combination.
Check how much we can use scikits.learn for this.
miscellaneous
~~~~~~~~~~~~~
Time series modeling of factors for prediction, ARMA, VARMA.
SUR and correlation structure
What about sandwich estimation, robust covariance matrices?
Similarity to Factor-Garch and Go-Garch
Updating: incremental PCA, ...?
TODO next
=========
MVOLS : OLS with multivariate endogenous and identical exogenous variables.
rewrite and expand current varma_process.VAR
PCA : write a class after all, and/or adjust the current donated class
and keep adding required statistics, e.g.
residual variance, projection of X on k-factors, ... updating ?
FactorModelUnivariate : started, does basic principal component regression,
based on standard information criteria, not Bai/Ng adjusted
FactorModelMultivariate : follow pattern for univariate version and use
MVOLS
'''
| bsd-3-clause | -1,135,308,256,399,465,300 | 37.509091 | 99 | 0.748033 | false |
crayzeewulf/android-quill | jni/libhpdf-2.3.0RC2/if/python/demo/outline_demo_jp.py | 32 | 3955 | ###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <[email protected]>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
for i in dir():
if 'CreateOutLine' in i:
print i
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close ()
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 20)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 20)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 20)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutLine (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutLine (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutLine (pdf, root, "page2", NULL)
# create outline with test which is encoding
outline[2] = HPDF_CreateOutLine (pdf, root, SAMP_TXT,
HPDF_GetEncoder (pdf, "90ms-RKSJ-H"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
# HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | gpl-3.0 | -1,698,629,336,440,422,400 | 25.866197 | 80 | 0.623009 | false |
mjfarmer/scada_py | env/lib/python2.7/site-packages/pip/download.py | 45 | 22491 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
import requests, six
from requests.adapters import BaseAdapter
from requests.auth import AuthBase, HTTPBasicAuth
from requests.compat import IncompleteRead
from requests.exceptions import InvalidURL, ChunkedEncodingError
from requests.models import Response
from requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size, decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does it's hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(target_url, stream=True)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| gpl-3.0 | 5,409,750,584,593,155,000 | 33.923913 | 114 | 0.586813 | false |
fjbatresv/odoo | addons/account/wizard/account_fiscalyear_close.py | 222 | 15660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
# reconcile_ref deptends from reconcile_id but was not recomputed
obj_acc_move_line._store_set_values(cr, uid, ids, ['reconcile_ref'], context=context)
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = [x['id'] for x in result]
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,104,959,318,338,358,800 | 54.907143 | 373 | 0.570333 | false |
AgrAlert/AgrAlert_Backend | lib/python2.7/site-packages/pip/_vendor/requests/models.py | 151 | 28156 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| mit | 8,626,519,625,544,754,000 | 32.43943 | 119 | 0.568689 | false |
tiagoantao/virtual-core | docker/file_router/copy/usr/bin/change_password.py | 1 | 1624 | #!/usr/bin/python2
import getpass
import sys
import pexpect
boot = False
if len(sys.argv) > 1 and sys.argv[1] == 'boot':
print "Boot mode"
boot = True
def get_passes():
old = getpass.getpass('Old password: ')
new1 = 'a'
new2 = 'b'
while new1 != new2:
new1 = getpass.getpass('New password: ')
new2 = getpass.getpass('New password (repeat): ')
return old, new1
def change_password(old=None, new=None):
if old is None:
old, new = get_passes()
p = pexpect.spawn('passwd')
p.expect('password')
p.sendline(old)
outcome = p.expect(['New', 'incorrect', 'error'])
p.sendline(new)
try:
outcome = p.expect('ew password:', timeout=1)
if p.match is None:
print p.buffer, 'new password'
else:
p.sendline(new)
outcome = p.expect(['success'] , timeout=1)
if p.match is not None:
return old, new
except:
print p.buffer, 'top level'
return False
def change_samba_password(old, new):
p = pexpect.spawn('smbpasswd')
p.expect('Old SMB password:')
p.sendline(old)
p.expect('New SMB password:')
p.sendline(new)
p.expect('Retype new SMB password:')
p.sendline(new)
p.expect('Password changed', timeout=2)
if p.match is None:
return False
else:
return True
pwds = change_password()
while not pwds:
pwds = change_password()
old, new = pwds
if not change_samba_password('boot' if boot else old, new):
print 'Samba password change failed, reverting ldap password'
change_password(new, old)
| agpl-3.0 | -7,338,901,903,040,035,000 | 23.606061 | 65 | 0.598522 | false |
scivey/mockthink | mockthink/test/functional/test_logic.py | 2 | 2727 | import rethinkdb as r
from mockthink.test.common import as_db_and_table, assertEqUnordered, assertEqual
from mockthink.test.functional.common import MockTest
from pprint import pprint
class TestLogic1(MockTest):
@staticmethod
def get_data():
data = [
{'id': 'joe', 'has_eyes': True, 'age': 22, 'hair_color': 'brown'},
{'id': 'sam', 'has_eyes': True, 'age': 17, 'hair_color': 'bald'},
{'id': 'angela', 'has_eyes': False, 'age': 26, 'hair_color': 'black'},
{'id': 'johnson', 'has_eyes': False, 'age': 16, 'hair_color': 'blonde'}
]
return as_db_and_table('pdb', 'p', data)
def test_not(self, conn):
expected = [
{'id': 'johnson'},
{'id': 'angela'}
]
result = r.db('pdb').table('p').filter(
lambda doc: ~doc['has_eyes']
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_and(self, conn):
expected = [
{'id': 'sam'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['has_eyes'].and_(doc['age'].lt(20))
).pluck('id').run(conn)
assertEqual(expected, list(result))
def test_or(self, conn):
expected = [
{'id': 'sam'},
{'id': 'angela'},
{'id': 'joe'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['has_eyes'].or_(doc['age'].gt(20))
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_gt(self, conn):
expected = [
{'id': 'joe'},
{'id': 'angela'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['age'] > 20
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_lt(self, conn):
expected = [
{'id': 'sam'},
{'id': 'johnson'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['age'].lt(20)
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_eq(self, conn):
expected = [
{'id': 'sam'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['hair_color'] == 'bald'
).pluck('id').run(conn)
assertEqual(expected, list(result))
def test_neq(self, conn):
expected = [
{'id': 'sam'},
{'id': 'angela'},
{'id': 'joe'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['hair_color'] != 'blonde'
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
| mit | 6,086,577,349,711,847,000 | 30.709302 | 83 | 0.482948 | false |
fenglu-g/incubator-airflow | airflow/contrib/sensors/weekday_sensor.py | 4 | 4291 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import six
from airflow.contrib.utils.weekday import WeekDay
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class DayOfWeekSensor(BaseSensorOperator):
"""
Waits until the first specified day of the week. For example, if the execution
day of the task is '2018-12-22' (Saturday) and you pass 'FRIDAY', the task will wait
until next Friday.
**Example** (with single day): ::
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day='Saturday',
use_task_execution_day=True,
dag=dag)
**Example** (with multiple day using set): ::
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day={'Saturday', 'Sunday'},
use_task_execution_day=True,
dag=dag)
**Example** (with :class:`~airflow.contrib.utils.weekday.WeekDay` enum): ::
# import WeekDay Enum
from airflow.contrib.utils.weekday import WeekDay
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day={WeekDay.SATURDAY, WeekDay.SUNDAY},
use_task_execution_day=True,
dag=dag)
:param week_day: Day of the week to check (full name). Optionally, a set
of days can also be provided using a set.
Example values:
* ``"MONDAY"``,
* ``{"Saturday", "Sunday"}``
* ``{WeekDay.TUESDAY}``
* ``{WeekDay.SATURDAY, WeekDay.SUNDAY}``
:type week_day: set or str or airflow.contrib.utils.weekday.WeekDay
:param use_task_execution_day: If ``True``, uses task's execution day to compare
with week_day. Execution Date is Useful for backfilling.
If ``False``, uses system's day of the week. Useful when you
don't want to run anything on weekdays on the system.
:type use_task_execution_day: bool
"""
@apply_defaults
def __init__(self, week_day,
use_task_execution_day=False,
*args, **kwargs):
super(DayOfWeekSensor, self).__init__(*args, **kwargs)
self.week_day = week_day
self.use_task_execution_day = use_task_execution_day
if isinstance(self.week_day, six.string_types):
self._week_day_num = {WeekDay.get_weekday_number(week_day_str=self.week_day)}
elif isinstance(self.week_day, WeekDay):
self._week_day_num = {self.week_day}
elif isinstance(self.week_day, set):
if all(isinstance(day, six.string_types) for day in self.week_day):
self._week_day_num = {WeekDay.get_weekday_number(day) for day in week_day}
elif all(isinstance(day, WeekDay) for day in self.week_day):
self._week_day_num = self.week_day
else:
raise TypeError(
'Unsupported Type for week_day parameter: {}. It should be one of str'
', set or Weekday enum type'.format(type(week_day)))
def poke(self, context):
self.log.info('Poking until weekday is in %s, Today is %s',
self.week_day,
WeekDay(timezone.utcnow().isoweekday()).name)
if self.use_task_execution_day:
return context['execution_date'].isoweekday() in self._week_day_num
else:
return timezone.utcnow().isoweekday() in self._week_day_num
| apache-2.0 | -6,501,315,078,532,189,000 | 39.866667 | 90 | 0.635982 | false |
SRabbelier/Melange | thirdparty/google_appengine/lib/django_1_2/django/utils/encoding.py | 44 | 7087 | import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return urllib.quote(smart_str(path).replace("\\", "/"), safe="/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| apache-2.0 | -109,524,060,624,590,860 | 38.372222 | 79 | 0.61126 | false |
mgit-at/ansible | test/units/modules/network/onyx/test_onyx_config.py | 16 | 4592 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
args = self.run_commands.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
| gpl-3.0 | -1,624,468,403,671,203,800 | 39.637168 | 95 | 0.668118 | false |
monopole/test-infra | gubernator/view_logs.py | 22 | 11302 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import gcs_async
import log_parser
import kubelet_parser
import regex
import view_base
@view_base.memcache_memoize('log-file-junit://', expires=60*60*4)
def find_log_junit(build_dir, junit, log_file):
"""
Looks in build_dir for log_file in a folder that
also includes the junit file.
"""
tmps = [f.filename for f in view_base.gcs_ls('%s/artifacts' % build_dir)
if '/tmp-node' in f.filename]
for folder in tmps:
filenames = [f.filename for f in view_base.gcs_ls(folder)]
if folder + junit in filenames:
path = folder + log_file
if path in filenames:
return path
def find_log_files(all_logs, log_file):
"""
Returns list of files named log_file from values in all_logs
"""
log_files = []
for folder in all_logs.itervalues():
for log in folder:
if log_file in log:
log_files.append(log)
return log_files
@view_base.memcache_memoize('all-logs://', expires=60*60*4)
def get_all_logs(directory, artifacts):
"""
returns dictionary given the artifacts folder with the keys being the
folders, and the values being the log files within the corresponding folder
"""
log_files = {}
if artifacts:
dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory)
if f.is_dir]
else:
dirs = [directory]
for d in dirs:
log_files[d] = []
for f in view_base.gcs_ls(d):
log_name = regex.log_re.search(f.filename)
if log_name:
log_files[d].append(f.filename)
return log_files
def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):
"""Based on make_dict, either returns the objref_dict or the parsed log file"""
log = gcs_async.read(log_filename).get_result()
if log is None:
return {}, False if make_dict else None
if pod:
bold_re = regex.wordRE(pod)
else:
bold_re = regex.error_re
if objref_dict is None:
objref_dict = {}
if make_dict and pod:
return kubelet_parser.make_dict(log.decode('utf8', 'replace'), bold_re, objref_dict)
else:
return log_parser.digest(log.decode('utf8', 'replace'),
error_re=bold_re, filters=filters, objref_dict=objref_dict)
def get_logs_junit((log_files, pod_name, filters, objref_dict, apiserver_filename)):
# Get the logs in the case where the junit file with the failure is in a specific folder
all_logs = {}
results = {}
# default to filtering kube-apiserver log if user unchecks both checkboxes
if log_files == []:
log_files = [apiserver_filename]
artifact_filename = os.path.dirname(apiserver_filename)
all_logs = get_all_logs(artifact_filename, False)
parsed_dict, _ = parse_log_file(os.path.join(artifact_filename, "kubelet.log"),
pod_name, make_dict=True, objref_dict=objref_dict)
objref_dict.update(parsed_dict)
if log_files:
for log_file in log_files:
parsed_file = parse_log_file(log_file, pod_name, filters, objref_dict=objref_dict)
if parsed_file:
results[log_file] = parsed_file
return all_logs, results, objref_dict, log_files
def get_logs_no_pod(apiserver_filename, kubelet_filenames, filters, objref_dict, all_logs):
# Get results of parsing logs when no pod name is given
results = {}
if apiserver_filename:
for apiserver_log in apiserver_filename:
parsed_file = parse_log_file(apiserver_log, "", filters,
objref_dict=objref_dict)
if parsed_file:
results[apiserver_log] = parsed_file
return all_logs, results, objref_dict, apiserver_filename
else:
for kubelet_log in kubelet_filenames:
parsed_file = parse_log_file(kubelet_log, "", filters,
objref_dict=objref_dict)
if parsed_file:
results[kubelet_log] = parsed_file
return all_logs, results, objref_dict, kubelet_filenames
def get_logs(build_dir, log_files, pod_name, filters, objref_dict):
"""
Get the logs in the case where all logs in artifacts folder may be relevant
Returns:
all_logs: dictionary of all logs that can be filtered
results: dictionary of log file to the parsed text
obref_dict: dictionary of name of filter to the string to be filtered
log_files: list of files that are being displayed/filtered
"""
all_logs = {}
results = {}
old_dict_len = len(objref_dict)
all_logs = get_all_logs(build_dir, True)
apiserver_filename = find_log_files(all_logs, "kube-apiserver.log")
kubelet_filenames = find_log_files(all_logs, "kubelet.log")
if not pod_name and not objref_dict:
return get_logs_no_pod(apiserver_filename, kubelet_filenames, filters,
objref_dict, all_logs)
for kubelet_log in kubelet_filenames:
if pod_name:
parsed_dict, pod_in_file = parse_log_file(kubelet_log, pod_name, make_dict=True,
objref_dict=objref_dict)
objref_dict.update(parsed_dict)
if len(objref_dict) > old_dict_len or not pod_name or pod_in_file or not objref_dict:
if log_files == []:
log_files = [kubelet_log]
if apiserver_filename:
log_files.extend(apiserver_filename)
for log_file in log_files:
parsed_file = parse_log_file(log_file, pod_name, filters,
objref_dict=objref_dict)
if parsed_file:
results[log_file] = parsed_file
break
return all_logs, results, objref_dict, log_files
def get_woven_logs(log_files, pod, filters, objref_dict):
lines = []
combined_lines = []
first_combined = ""
pod_re = regex.wordRE(pod)
# Produce a list of lines of all the selected logs
for log_file in log_files:
log = gcs_async.read(log_file).get_result()
log = log.decode('utf8', 'replace')
lines.extend(log.split('\n'))
# Combine lines without timestamp into previous line, except if it comes at the
# beginning of the file, in which case add it to the line with the first timestamp
for line in lines:
timestamp_re = regex.timestamp(line)
if timestamp_re and timestamp_re.group(0):
if not combined_lines:
# add beginning of file to first timestamp line
line = first_combined + line
combined_lines.append(line)
else:
if not combined_lines:
first_combined = first_combined + line
else:
combined_lines[-1] = combined_lines[-1] + line
lines = sorted(combined_lines, key=regex.sub_timestamp)
data = '\n'.join(lines)
woven_logs = log_parser.digest(data, error_re=pod_re,
filters=filters, objref_dict=objref_dict)
return woven_logs
def parse_by_timestamp((build_dir, junit, log_files, pod, filters, objref_dict)):
"""
Returns:
woven_logs: HTML code of chosen logs woven together by timestamp
all_logs: Dictionary of logs relevant for filtering
"""
woven_logs = get_woven_logs(log_files, pod, filters, objref_dict)
apiserver_filename = find_log_junit(build_dir, junit, "kube-apiserver.log")
if apiserver_filename:
artifact_filename = re.sub("/kube-apiserver.log", "", apiserver_filename)
all_logs = get_all_logs(artifact_filename, False)
if not apiserver_filename:
all_logs = get_all_logs(build_dir, True)
return woven_logs, all_logs
class NodeLogHandler(view_base.BaseHandler):
def get(self, prefix, job, build):
"""
Examples of variables
log_files: ["kubelet.log", "kube-apiserver.log"]
pod_name: "pod-abcdef123"
junit: "junit_01.xml"
uid, namespace, wrap: "on"
cID, poduid, ns: strings entered into textboxes
results, logs: {"kubelet.log":"parsed kubelet log for html"}
all_logs: {"folder_name":["a.log", "b.log"]}
"""
# pylint: disable=too-many-locals
job_dir = '/%s/%s/' % (prefix, job)
build_dir = job_dir + build
log_files = self.request.get_all("logfiles")
others = self.request.get_all("others")
pod_name = self.request.get("pod")
junit = self.request.get("junit")
cID = self.request.get("cID")
poduid = self.request.get("poduid")
ns = self.request.get("ns")
uid = bool(self.request.get("UID"))
namespace = bool(self.request.get("Namespace"))
containerID = bool(self.request.get("ContainerID"))
wrap = bool(self.request.get("wrap"))
weave = bool(self.request.get("weave"))
filters = {"UID":uid, "pod":pod_name, "Namespace":namespace, "ContainerID":containerID}
objref_dict = {}
results = {}
woven_logs = ""
for idx, filter_term in enumerate(others):
filters["other%d" % idx] = filter_term
objref_dict["other%d" % idx] = filter_term
if cID:
objref_dict["ContainerID"] = cID
if poduid:
objref_dict["UID"] = poduid
if ns:
objref_dict["Namespace"] = ns
apiserver_filename = find_log_junit(build_dir, junit, "kube-apiserver.log")
if not weave or len(log_files) == 1:
weave = False
if apiserver_filename and pod_name:
all_logs, results, objref_dict, log_files = get_logs_junit((log_files,
pod_name, filters, objref_dict, apiserver_filename))
if not apiserver_filename:
all_logs, results, objref_dict, log_files = get_logs(build_dir, log_files,
pod_name, filters, objref_dict)
else:
woven_logs, all_logs = parse_by_timestamp((build_dir, junit, log_files, pod_name,
filters, objref_dict))
if (not weave and results == {}) or (weave and woven_logs == ""):
self.render('node_404.html', {"build_dir": build_dir, "log_files": log_files,
"pod_name":pod_name, "junit":junit})
self.response.set_status(404)
return
self.render('filtered_log.html', dict(
job_dir=job_dir, build_dir=build_dir, logs=results, job=job,
build=build, log_files=log_files, containerID=containerID, others=others,
pod=pod_name, junit=junit, uid=uid, namespace=namespace, weave=weave,
wrap=wrap, objref_dict=objref_dict, all_logs=all_logs, woven_logs=woven_logs))
| apache-2.0 | 7,584,143,211,763,042,000 | 37.972414 | 95 | 0.617501 | false |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.5/django/http/multipartparser.py | 82 | 22856 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils import six
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
try:
charset = meta_data.get('content-type', (0, {}))[1].get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always read base64 streams by multiple of 4
over_bytes = len(chunk) % 4
if over_bytes:
over_chunk = field_stream.read(4 - over_bytes)
chunk += over_chunk
try:
chunk = base64.b64decode(chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_text(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end-1)
if data[last:last+1] == b'\n':
end -= 1
last = max(0, end-1)
if data[last:last+1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
name = p[:i].strip().lower().decode('ascii')
value = p[i+1:].strip()
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| lgpl-3.0 | 757,544,916,095,764,500 | 34.993701 | 109 | 0.545459 | false |
KohlsTechnology/ansible | lib/ansible/modules/inventory/group_by.py | 67 | 1457 | # -*- mode: python -*-
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
parents:
description:
- The list of the parent groups
required: false
default: "all"
version_added: "2.4"
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
- This module is also supported for Windows targets.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by:
key: machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by:
key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
# Create nested groups
- group_by:
key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
parents:
- el{{ ansible_distribution_major_version }}
'''
| gpl-3.0 | 1,556,366,349,473,486,800 | 25.490909 | 92 | 0.668497 | false |
DavidHerzogTU-Berlin/cassandraToRun | test/system/test_thrift_server.py | 1 | 103505 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to run a single test, run from trunk/:
# PYTHONPATH=test nosetests --tests=system.test_thrift_server:TestMutations.test_empty_range
import os, sys, time, struct, uuid, re
from . import root, ThriftTester
from . import thrift_client as client
from thrift.Thrift import TApplicationException
from ttypes import *
from constants import VERSION
def _i64(n):
return struct.pack('>q', n) # big endian = network order
_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0)]
_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
Column(_i64(6), 'value6', 0)])]
def _assert_column(column_family, key, column, value, ts = 0):
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value) )
def _assert_columnpath_exists(key, column_path):
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path) )
def _assert_no_columnpath(key, column_path):
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple(block=True):
return _insert_multi(['key1'])
def _insert_batch(block):
return _insert_multi_batch(['key1'], block)
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
def _insert_multi_batch(keys, block):
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
for key in keys:
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_range():
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_range():
client.add('key1', ColumnParent('Counter1'), CounterColumn('c1', 1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c2', 2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c3', 3), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c3'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _verify_counter_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c1'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c3'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_super_range():
client.add('key1', ColumnParent('SuperCounter1', 'sc1'), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc3'), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc2'
assert result[1].super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc3'
assert result[1].super_column.name == 'sc2'
def _verify_counter_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc2'
assert result[1].counter_super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc3'
assert result[1].counter_super_column.name == 'sc2'
def _verify_super(supercf='Super1', key='key1'):
assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_, t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
class TestMutations(ThriftTester):
def test_insert(self):
_set_keyspace('Keyspace1')
_insert_simple(False)
time.sleep(0.1)
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
assert _big_slice('key1', ColumnParent('Standard2')) == []
assert _big_slice('key1', ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
def cas(expected, updates):
return client.cas('key1', 'Standard1', expected, updates, ConsistencyLevel.ONE)
cas_result = cas(_SIMPLE_COLUMNS, _SIMPLE_COLUMNS)
assert not cas_result.success
assert len(cas_result.current_values) == 0, cas_result
assert cas([], _SIMPLE_COLUMNS).success
result = [cosc.column for cosc in _big_slice('key1', ColumnParent('Standard1'))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
cas_result = cas([], _SIMPLE_COLUMNS)
assert not cas_result.success
# When we CAS for non-existence, current_values is the first live column of the row
assert dict((c.name, c.value) for c in cas_result.current_values) == { _SIMPLE_COLUMNS[0].name : _SIMPLE_COLUMNS[0].value }, cas_result
# CL.SERIAL for reads
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.SERIAL).column.value == 'value1'
def test_missing_super(self):
_set_keyspace('Keyspace1')
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
super_column_parent = ColumnParent('Super1', 'sc3')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({'key1' : cfmap }, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
# test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
def test_count_around_page_size(self):
def slice_predicate(count):
return SlicePredicate(slice_range=SliceRange('', '', False, count))
_set_keyspace('Keyspace1')
key = 'key1'
parent = ColumnParent('Standard1')
cl = ConsistencyLevel.ONE
for i in xrange(0, 3050):
client.insert(key, parent, Column(str(i), '', 0), cl)
# same as page size
assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
# 1 above page size
assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
# above number or columns
assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
# same as number of columns
assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
# 1 above number of columns
assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
def test_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_simple()
_verify_simple()
def test_super_insert(self):
_set_keyspace('Keyspace1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
_insert_super()
result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
import uuid
L = []
_set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i].column
assert u.value == 'value%s' % i
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_long_remove(self):
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardLong1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_integer_remove(self):
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardInteger1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
_insert_batch(False)
time.sleep(0.1)
_verify_batch()
def test_batch_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_batch(True)
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(27,32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_standard_columns_blocking(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(38,46)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
first_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
second_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
first_deletion = {'super_column': "sc1",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': "sc2",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = ['key_30', 'key_31']
for key in keys:
sc = SuperColumn('sc1',[Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1' : [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2' : [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_yet_accept_slice_ranges
def send_range():
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange(start='0', finish="", count=10))
d = Deletion(2, predicate=sp)
client.batch_mutate({'key_35': {'Standard1':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(send_range, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_37': {'Undefined':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert('key1', ColumnParent('Standard1'), Column('x'*1, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*127, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*128, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*129, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*255, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*256, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*257, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16 - 1), 'value', 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get('x' * 2**16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be positive
column = Column('cttl1', 'value1', 0, 0)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard ColumnFamily
deletion = Deletion(1, 'supercolumn', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Standard1' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, 'x', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Super5' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_batch_insert_super_blocking(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
== Column('c2', 'value2', 0)
assert _big_slice('key1', ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
result = _big_slice('key1', ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
# check removing the entire super cf, too.
client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {'key3': []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name='sc1',
columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, actual
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
Column(_i64(6), 'value6', 0),
Column(_i64(7), 'value7', 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column='sc2')
e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
columns = [result.column
for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), 'value5', 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
key = 'vijay'
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27','28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', 'b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
predicate = SlicePredicate(column_names=['col1', 'col3'])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == 'key1'
assert result[1].key == 'key2'
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == 'col2'
assert result[0].columns[2].column.name == 'col4'
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == 'col4'
assert result[0].columns[2].column.name == 'col2'
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
assert result[1].columns[0].column.name == 'col1'
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
for key in ['a', 'b', 'c', 'd', 'e']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
_insert_range()
p = SlicePredicate(column_names=['c1', 'c2'])
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = ['key'+str(i) for i in range(1, num_keys+1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
keys1 = rows.keys().sort()
keys2 = keys.sort()
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert rows.has_key(key) == True
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys+1):
key = 'key'+str(i)
for j in range(1, i+1):
client.insert(key, ColumnParent('Standard1'), Column('c'+str(j), 'value'+str(j), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = ['key'+str(i) for i in range(1, num_keys+1)]
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys+1):
key = 'key'+str(i)
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
for n in xrange(1, 4):
p = SlicePredicate(slice_range=SliceRange('', '', False, n))
slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
kspaces = client.describe_keyspaces()
assert len(kspaces) == 5, kspaces # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break;
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
server_version = client.describe_version()
assert server_version == VERSION, (server_version, VERSION)
assert client.describe_cluster_name() == 'Test Cluster'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = client.describe_token_map().items()
assert len(ring) == 1
token, node = ring[0]
assert re.match("[0-9A-Fa-f]{32}", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == 'colA' and col1.value == 'colA-value'
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0 , 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor':'1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef('col', 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column('col', _i64(42), 0)
col1 = Column('col', "ceci n'est pas 64bit", 0)
client.insert('key0', cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily','sc1')
client.insert('key0', scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert('key1', dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column('fcol', _i64(4224), 0)
e = _expect_exception(lambda: client.insert('key1', dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column('fcol', "Stringin' it up in the Stringtel Stringifornia", 0)
client.insert('key0', dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='BlankCF'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef('age', 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
name_coldef = ColumnDef('name', 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
age_coldef = ColumnDef('age', 'BytesType', None, None)
name_coldef = ColumnDef('name', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef('birthdate', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert('key1', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(0.5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
""" Test simple insertion of a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl1', 'value1', 0, 5)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
assert client.get('key1', ColumnPath('Standard1', column='cttl1'), ConsistencyLevel.ONE).column == column
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
column = Column('cttl3', 'value1', 0, 2)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
column = Column('cttl4', 'value1', 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
column1 = Column('cttl4', 'value1', 0, 1)
client.insert('key1', ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column('cttl4', 'value1', 1)
client.insert('key1', ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl5', 'value1', 0, 10)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='cttl5'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='ctt5'), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_decr_super_add(self):
_set_keyspace('Keyspace1')
d1 = -234
d2 = 52345
d3 = 3123
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c2', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath('SuperCounter1', 'sc1', 'c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_batch_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
def test_incr_decr_standard_batch_remove(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
# remove the previous column and check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=['c1']))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1+d2
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_standard_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.get_slice('key1', ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1+d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_muliget_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.multiget_slice(['key1', 'key2'], ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters['key1'][0].counter_column.value == d1+d2
assert counters['key1'][1].counter_column.value == d1
assert counters['key2'][0].counter_column.value == d1+d2
assert counters['key2'][1].counter_column.value == d1
def test_counter_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_counter_range()
_verify_counter_range()
def test_counter_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_counter_super_range()
_verify_counter_super_range()
def test_index_scan(self):
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(3)), IndexExpression('birthdate', IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key3'
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert('key1', ColumnParent('Indexed3'), Column(u, 'a', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Indexed3'), Column(u2, 'b', 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange('', '', None, None, [IndexExpression(u, IndexOperator.EQ, 'a'), IndexExpression(u2, IndexOperator.EQ, 'b')], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression('foo', IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, "foo")], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0, 1), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(2)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
key = 'doesntexist'
column_path = ColumnPath(column_family="Standard1", column="idontexist")
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
key = 'key1'
# three supercoluns, each with "col1" subcolumn
for i in range(1,4):
client.insert(key, ColumnParent('Super3', 'sc%d' % i), Column('col1', 'val1', 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange('sc1', 'sc3', False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', 'sc1'), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', 'sc1'), Column('col1', 'val1', 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == 'sc1'
class TestTruncate(ThriftTester):
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
client.truncate('Standard1')
assert _big_slice('key1', ColumnParent('Standard1')) == []
# truncate Super1
client.truncate('Super1')
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
| apache-2.0 | -9,103,701,712,682,797,000 | 50.189416 | 623 | 0.622211 | false |
EliAndrewC/ensconce | ensconce/util/cpjsonrpc.py | 1 | 11059 | # coding: utf-8
"""
From http://code.google.com/p/cherrypy-jsonrpc (LGPL)
Some modifications:
- Content-Type (application/json)
"""
import sys
import httplib
import cherrypy
import traceback
try:
import jsonlib2 as json
_ParseError = json.ReadError
except ImportError:
import json
_ParseError = ValueError
def _raw_body_reader():
"""
Liest den Body ein, bevor dieser von CherryPy falsch geparst wird.
Reads the body, before CherryPy parses it in a false kind.
"""
if cherrypy.request.method in cherrypy.request.methods_with_bodies:
cherrypy.request.raw_body = cherrypy.request.rfile.read()
cherrypy.tools.raw_body_reader = cherrypy.Tool("before_request_body", _raw_body_reader)
def set_content_type_json():
"""
Setzt den Content-Type des Response auf "text/x-json"
"""
cherrypy.response.headers["Content-Type"] = "application/json"
class SuccessfulResponse(object):
"""
Represents a successful response.
"""
def __init__(self, jsonrpc = None, id = None, result = None):
"""
:param jsonrpc: JSON-RPC version string
:param id: JSON-RPC transaction id
:param result: Result data
"""
self.jsonrpc = jsonrpc
self.id = id
self.result = result
def to_dict(self):
"""
Returns the response object as dictionary.
"""
retdict = {}
if self.jsonrpc:
retdict["jsonrpc"] = self.jsonrpc
if not self.id is None:
retdict["id"] = self.id
if not self.result is None:
retdict["result"] = self.result
return retdict
class ErrorResponse(object):
"""
Represents an error response object
"""
code = None
message = None
def __init__(self, jsonrpc = None, id = None, data = None):
"""
:param jsonrpc: JSON-RPC version string
:param id: JSON-RPC transaction id
:param data: Additional error informations. Can be any, to JSON
translatable, data structure.
"""
self.jsonrpc = jsonrpc
self.id = id
self.data = data
def to_dict(self):
"""
Returns the response object as dictionary.
"""
retdict = {"error": {}}
if self.jsonrpc:
retdict["jsonrpc"] = self.jsonrpc
retdict["id"] = self.id
retdict["error"]["code"] = self.code
retdict["error"]["message"] = self.message
if self.data:
retdict["error"]["data"] = self.data
if isinstance(self.data, basestring):
if self.message:
retdict["error"]["message"] = \
self.message + u" " + self.data.capitalize()
else:
retdict["error"]["message"] = self.data.capitalize()
return retdict
class ParseErrorResponse(ErrorResponse):
code = -32700
message = u"Invalid JSON was received by the server."
class InvalidRequestResponse(ErrorResponse):
code = -32600
message = u"The JSON sent is not a valid Request object."
class MethodNotFoundResponse(ErrorResponse):
code = -32601
message = u"The method does not exist / is not available."
class InvalidParamsResponse(ErrorResponse):
code = -32602
message = u"Invalid method parameter(s)."
class InternalErrorResponse(ErrorResponse):
code = -32603
message = u"Internal JSON-RPC error."
class JsonRpcMethods(object):
"""
Erbt man von dieser Klasse, dann werden die mit *exposed* markierten
Methoden der Klasseninstanz automatisch zu JSON-RPC-Methoden.
"""
_cp_config = {
"tools.encode.on": True,
"tools.encode.encoding": "utf-8",
"tools.decode.on": True,
"tools.raw_body_reader.on": True,
}
def __init__(self, debug = False):
self.debug = debug
# Alle mit *exposed* markierten Attribute/Methoden (ausgenommen die
# *default*-Methode) dieser Klasse werden als JSON-RPC-Methoden markiert.
# Weiters wird deren *exposed*-Flag entfernt.
rpc_methods = {}
for attribute_name in dir(self):
if (
not attribute_name.startswith("_") and
attribute_name != "default"
):
item = getattr(self, attribute_name)
if hasattr(item, "exposed") and item.exposed:
# Es handelt sich um eine mit exposed markierte Funktion
rpc_methods[attribute_name] = item
del item.__dict__["exposed"]
self.rpc_methods = rpc_methods
def default(self, *args, **kwargs):
"""
Nimmt die JSON-RPC-Anfrage entgegen und übergibt sie an die entsprechende
JSON-RPC-Methode.
"""
responses = []
# Response content type -> JSON
set_content_type_json()
# Get data
if cherrypy.request.method == "GET":
data = kwargs
if "params" in data:
if self.debug:
cherrypy.log("")
cherrypy.log(u"params (raw): " + repr(data["params"]))
cherrypy.log("")
try:
data["params"] = json.loads(data["params"])
except _ParseError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
return json.dumps(
ParseErrorResponse(
data = unicode(err)
).to_dict()
)
requests = [data]
elif cherrypy.request.method == "POST":
if self.debug:
cherrypy.log("")
cherrypy.log(u"cherrypy.request.raw_body:")
cherrypy.log(repr(cherrypy.request.raw_body))
cherrypy.log("")
try:
data = json.loads(cherrypy.request.raw_body)
except _ParseError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
return json.dumps(
ParseErrorResponse(
data = unicode(err)
).to_dict()
)
if isinstance(data, list):
requests = data
else:
requests = [data]
else:
raise cherrypy.HTTPError(
status = httplib.BAD_REQUEST,
message = "Only GET or POST allowed"
)
# Every JSON-RPC request in a batch of requests
for request in requests:
# jsonrpc
jsonrpc = request.get("jsonrpc")
# method
method = str(request.get("method", ""))
# id
id = request.get("id")
# split positional and named params
positional_params = []
named_params = {}
params = request.get("params", [])
if isinstance(params, list):
positional_params = params
elif isinstance(params, dict):
positional_params = params.get("__args", [])
if positional_params:
del params["__args"]
named_params = params
# Debug
if self.debug:
cherrypy.log("")
cherrypy.log(u"jsonrpc: " + repr(jsonrpc))
cherrypy.log(u"request: " + repr(request))
cherrypy.log(u"positional_params: " + repr(positional_params))
cherrypy.log(u"named_params: " + repr(named_params))
cherrypy.log(u"method: " + repr(method))
cherrypy.log(u"id: " + repr(id))
cherrypy.log("")
# Do we know the method name?
if not method in self.rpc_methods:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log("JSON-RPC method '%s' not found" % method)
responses.append(
MethodNotFoundResponse(jsonrpc = jsonrpc, id = id).to_dict()
)
continue
# Call the method with parameters
try:
rpc_function = self.rpc_methods[method]
result = rpc_function(*positional_params, **named_params)
# No return value is OK if we don´t have an ID (=notification)
if result is None:
if id:
cherrypy.log("No result from JSON-RPC method '%s'" % method)
responses.append(
InternalErrorResponse(
jsonrpc = jsonrpc,
id = id,
data = u"No result from JSON-RPC method."
).to_dict()
)
else:
# Successful response
responses.append(
SuccessfulResponse(
jsonrpc = jsonrpc, id = id, result = result
).to_dict()
)
except TypeError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
if "takes exactly" in unicode(err) and "arguments" in unicode(err):
responses.append(
InvalidParamsResponse(jsonrpc = jsonrpc, id = id).to_dict()
)
else:
responses.append(
InternalErrorResponse(
jsonrpc = jsonrpc,
id = id,
data = unicode(err)
).to_dict()
)
except BaseException, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
if hasattr(err, "data"):
error_data = err.data
else:
error_data = None
responses.append(
InternalErrorResponse(
jsonrpc = jsonrpc,
id = id,
data = error_data or unicode(err)
).to_dict()
)
# Return as JSON-String (batch or normal)
if len(requests) == 1:
return json.dumps(responses[0])
elif len(requests) > 1:
return json.dumps(responses)
else:
return None
default.exposed = True
| bsd-3-clause | -1,376,914,994,594,325,800 | 31.422287 | 90 | 0.498643 | false |
guewen/odoo | addons/portal_project_issue/tests/__init__.py | 167 | 1124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,277,419,654,075,290,000 | 39.142857 | 78 | 0.618327 | false |
github-account-because-they-want-it/django | tests/db_functions/models.py | 245 | 1332 | """
Tests for built in Function expressions.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50)
alias = models.CharField(max_length=50, null=True, blank=True)
goes_by = models.CharField(max_length=50, null=True, blank=True)
age = models.PositiveSmallIntegerField(default=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
authors = models.ManyToManyField(Author, related_name='articles')
title = models.CharField(max_length=50)
summary = models.CharField(max_length=200, null=True, blank=True)
text = models.TextField()
written = models.DateTimeField()
published = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField(null=True, blank=True)
views = models.PositiveIntegerField(default=0)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Fan(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveSmallIntegerField(default=30)
author = models.ForeignKey(Author, models.CASCADE, related_name='fans')
def __str__(self):
return self.name
| bsd-3-clause | -7,803,506,424,035,411,000 | 29.976744 | 75 | 0.718468 | false |
suyashdb/hcp2bids | setup.py | 1 | 2182 | from setuptools import setup
import os, glob, shutil
import re, json, numpy
import nibabel as ni
here = os.path.abspath(os.path.dirname(__file__))
setup(
name="hcp2bids",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description="Command line tool to convert HCP dataset to a Brain Imaging Data Structure "
"compatible dataset.",
long_description="Command line tool to convert HCP dataset to a Brain Imaging Data Structure "
"compatible dataset.",
# The project URL.
url='https://github.com/suyashdb/hcp2bids',
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='BIDS HCP NIH',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=["hcp2bids"],
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ["numpy",
"pandas",
'nibabel'],
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'hcp2bids=hcp2bids.main:main',
],
},
)
| mit | -5,058,204,562,386,402,000 | 32.060606 | 98 | 0.636114 | false |
Endika/mitmproxy | libmproxy/contrib/jsbeautifier/__init__.py | 50 | 41216 | import sys
import getopt
import re
import string
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, [email protected],
# MIT licence, enjoy.
#
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10.
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
)
class BeautifierFlags:
def __init__(self, mode):
self.previous_mode = 'BLOCK'
self.mode = mode
self.var_line = False
self.var_line_tainted = False
self.var_line_reindented = False
self.in_html_comment = False
self.if_line = False
self.in_case = False
self.eat_next_space = False
self.indentation_baseline = -1
self.indentation_level = 0
self.ternary_depth = 0
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
f = sys.stdin
else:
try:
f = open(file_name)
except Exception as ex:
return 'The file could not be opened'
b = Beautifier()
return b.beautify(''.join(f.readlines()), opts)
def usage():
print("""Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
""")
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
def blank_state(self):
# internal flags
self.flags = BeautifierFlags('BLOCK')
self.flag_store = []
self.wanted_newline = False
self.just_added_newline = False
self.do_block_just_closed = False
if self.opts.indent_with_tabs:
self.indent_string = "\t"
else:
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_word = '' # last TK_WORD seen
self.last_type = 'TK_START_EXPR' # last token type
self.last_text = '' # last token text
self.last_last_text = '' # pre-last token text
self.input = None
self.output = [] # formatted javascript gets built here
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! !! , : ? ^ ^= |= ::'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,if,switch,case,default,for,while,break,function'.split(',')
self.set_mode('BLOCK')
global parser_pos
parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
#self.input = self.unpack(s, opts.eval_code)
# CORTESI
self.input = s
parser_pos = 0
while True:
token_text, token_type = self.get_next_token()
#print (token_text, token_type, self.flags.mode)
if token_type == 'TK_EOF':
break
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_UNKNOWN': self.handle_unknown,
}
handlers[token_type](token_text)
self.last_last_text = self.last_text
self.last_type = token_type
self.last_text = token_text
sweet_code = self.preindent_string + re.sub('[\n ]+$', '', ''.join(self.output))
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
while len(self.output) \
and (
self.output[-1] == ' '\
or self.output[-1] == self.indent_string \
or self.output[-1] == self.preindent_string \
or (eat_newlines and self.output[-1] in ['\n', '\r'])):
self.output.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else'];
def is_array(self, mode):
return mode in ['[EXPRESSION]', '[INDENDED-EXPRESSION]']
def is_expression(self, mode):
return mode in ['[EXPRESSION]', '[INDENDED-EXPRESSION]', '(EXPRESSION)', '(FOR-EXPRESSION)', '(COND-EXPRESSION)']
def append_newline_forced(self):
old_array_indentation = self.opts.keep_array_indentation
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = old_array_indentation
def append_newline(self, ignore_repeated = True):
self.flags.eat_next_space = False
if self.opts.keep_array_indentation and self.is_array(self.flags.mode):
return
self.flags.if_line = False
self.trim_output()
if len(self.output) == 0:
# no newline on start of file
return
if self.output[-1] != '\n' or not ignore_repeated:
self.just_added_newline = True
self.output.append('\n')
if self.preindent_string:
self.output.append(self.preindent_string)
for i in range(self.flags.indentation_level):
self.output.append(self.indent_string)
if self.flags.var_line and self.flags.var_line_reindented:
self.output.append(self.indent_string)
def append(self, s):
if s == ' ':
# do not add just a single space after the // comment, ever
if self.last_type == 'TK_COMMENT':
return self.append_newline()
# make sure only single space gets drawn
if self.flags.eat_next_space:
self.flags.eat_next_space = False
elif len(self.output) and self.output[-1] not in [' ', '\n', self.indent_string]:
self.output.append(' ')
else:
self.just_added_newline = False
self.flags.eat_next_space = False
self.output.append(s)
def indent(self):
self.flags.indentation_level = self.flags.indentation_level + 1
def remove_indent(self):
if len(self.output) and self.output[-1] in [self.indent_string, self.preindent_string]:
self.output.pop()
def set_mode(self, mode):
prev = BeautifierFlags('BLOCK')
if self.flags:
self.flag_store.append(self.flags)
prev = self.flags
self.flags = BeautifierFlags(mode)
if len(self.flag_store) == 1:
self.flags.indentation_level = 0
else:
self.flags.indentation_level = prev.indentation_level
if prev.var_line and prev.var_line_reindented:
self.flags.indentation_level = self.flags.indentation_level + 1
self.flags.previous_mode = prev.mode
def restore_mode(self):
self.do_block_just_closed = self.flags.mode == 'DO_BLOCK'
if len(self.flag_store) > 0:
mode = self.flags.mode
self.flags = self.flag_store.pop()
self.flags.previous_mode = mode
def get_next_token(self):
global parser_pos
self.n_newlines = 0
if parser_pos >= len(self.input):
return '', 'TK_EOF'
self.wanted_newline = False
c = self.input[parser_pos]
parser_pos += 1
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
if keep_whitespace:
# slight mess to allow nice preservation of array indentation and reindent that correctly
# first time when we get to the arrays:
# var a = [
# ....'something'
# we make note of whitespace_count = 4 into flags.indentation_baseline
# so we know that 4 whitespaces in original source match indent_level of reindented source
#
# and afterwards, when we get to
# 'something,
# .......'something else'
# we know that this should be indented to indent_level + (7 - indentation_baseline) spaces
whitespace_count = 0
while c in self.whitespace:
if c == '\n':
self.trim_output()
self.output.append('\n')
self.just_added_newline = True
whitespace_count = 0
elif c == '\t':
whitespace_count += 4
elif c == '\r':
pass
else:
whitespace_count += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.flags.indentation_baseline == -1:
self.flags.indentation_baseline = whitespace_count
if self.just_added_newline:
for i in range(self.flags.indentation_level + 1):
self.output.append(self.indent_string)
if self.flags.indentation_baseline != -1:
for i in range(whitespace_count - self.flags.indentation_baseline):
self.output.append(' ')
else: # not keep_whitespace
while c in self.whitespace:
if c == '\n':
if self.opts.max_preserve_newlines == 0 or self.opts.max_preserve_newlines > self.n_newlines:
self.n_newlines += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i == 0)
self.just_added_newline = True
self.wanted_newline = self.n_newlines > 0
if c in self.wordchar:
if parser_pos < len(self.input):
while self.input[parser_pos] in self.wordchar:
c = c + self.input[parser_pos]
parser_pos += 1
if parser_pos == len(self.input):
break
# small and surprisingly unugly hack for 1E-10 representation
if parser_pos != len(self.input) and self.input[parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[parser_pos]
parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
if self.wanted_newline and \
self.last_type != 'TK_OPERATOR' and\
self.last_type != 'TK_EQUALS' and\
not self.flags.if_line and \
(self.opts.preserve_newlines or self.last_text != 'var'):
self.append_newline()
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
comment_mode = 'TK_INLINE_COMMENT'
if self.input[parser_pos] == '*': # peek /* .. */ comment
parser_pos += 1
if parser_pos < len(self.input):
while not (self.input[parser_pos] == '*' and \
parser_pos + 1 < len(self.input) and \
self.input[parser_pos + 1] == '/')\
and parser_pos < len(self.input):
c = self.input[parser_pos]
comment += c
if c in '\r\n':
comment_mode = 'TK_BLOCK_COMMENT'
parser_pos += 1
if parser_pos >= len(self.input):
break
parser_pos += 2
return '/*' + comment + '*/', comment_mode
if self.input[parser_pos] == '/': # peek // comment
comment = c
while self.input[parser_pos] not in '\r\n':
comment += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
parser_pos += 1
if self.wanted_newline:
self.append_newline()
return comment, 'TK_COMMENT'
if c == "'" or c == '"' or \
(c == '/' and ((self.last_type == 'TK_WORD' and self.is_special_word(self.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(FOR-EXPRESSION)', '(COND-EXPRESSION)']) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR',
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON']))):
sep = c
esc = False
resulting_string = c
in_char_class = False
if parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if not esc:
esc = self.input[parser_pos] == '\\'
if self.input[parser_pos] == '[':
in_char_class = True
elif self.input[parser_pos] == ']':
in_char_class = False
else:
esc = False
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
else:
# handle string
while esc or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if not esc:
esc = self.input[parser_pos] == '\\'
else:
esc = False
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while parser_pos < len(self.input) and self.input[parser_pos] in self.wordchar:
resulting_string += self.input[parser_pos]
parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output) == 0 and len(self.input) > 1 and self.input[parser_pos] == '!':
resulting_string = c
while parser_pos < len(self.input) and c != '\n':
c = self.input[parser_pos]
resulting_string += c
parser_pos += 1
self.output.append(resulting_string.strip() + "\n")
self.append_newline()
return self.get_next_token()
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if parser_pos < len(self.input) and self.input[parser_pos] in self.digits:
while True:
c = self.input[parser_pos]
sharp += c
parser_pos += 1
if parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or parser_pos >= len(self.input):
pass
elif self.input[parser_pos] == '[' and self.input[parser_pos + 1] == ']':
sharp += '[]'
parser_pos += 2
elif self.input[parser_pos] == '{' and self.input[parser_pos + 1] == '}':
sharp += '{}'
parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[parser_pos - 1 : parser_pos + 3] == '<!--':
parser_pos += 3
c = '<!--'
while parser_pos < len(self.input) and self.input[parser_pos] != '\n':
c += self.input[parser_pos]
parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[parser_pos - 1 : parser_pos + 2] == '-->':
self.flags.in_html_comment = False
parser_pos += 2
if self.wanted_newline:
self.append_newline()
return '-->', 'TK_COMMENT'
if c in self.punct:
while parser_pos < len(self.input) and c + self.input[parser_pos] in self.punct:
c += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
if c == '=':
return c, 'TK_EQUALS'
else:
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if token_text == '[':
if self.last_type == 'TK_WORD' or self.last_text == ')':
if self.last_text in self.line_starters:
self.append(' ')
self.set_mode('(EXPRESSION)')
self.append(token_text)
return
if self.flags.mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']:
if self.last_last_text == ']' and self.last_text == ',':
# ], [ goes to a new line
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
elif self.last_text == '[':
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
else:
self.set_mode('[EXPRESSION]')
else:
self.set_mode('[EXPRESSION]')
else:
if self.last_text == 'for':
self.set_mode('(FOR-EXPRESSION)')
elif self.last_text in ['if', 'while']:
self.set_mode('(COND-EXPRESSION)')
else:
self.set_mode('(EXPRESSION)')
if self.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
if self.wanted_newline:
self.append_newline();
elif self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.append(' ')
elif self.last_word == 'function' or self.last_word == 'typeof':
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.append(' ')
elif self.last_text in self.line_starters or self.last_text == 'catch':
self.append(' ')
self.append(token_text)
def handle_end_expr(self, token_text):
if token_text == ']':
if self.opts.keep_array_indentation:
if self.last_text == '}':
self.remove_indent()
self.append(token_text)
self.restore_mode()
return
else:
if self.flags.mode == '[INDENTED-EXPRESSION]':
if self.last_text == ']':
self.restore_mode()
self.append_newline()
self.append(token_text)
return
self.restore_mode()
self.append(token_text)
def handle_start_block(self, token_text):
if self.last_word == 'do':
self.set_mode('DO_BLOCK')
else:
self.set_mode('BLOCK')
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR':
if self.last_text == '=' or (self.is_special_word(self.last_text) and self.last_text != 'else'):
self.append(' ')
else:
self.append_newline(True)
self.append(token_text)
self.indent()
else:
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.append(' ')
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.flags.previous_mode) and self.last_text == ',':
if self.last_last_text == '}':
self.append(' ')
else:
self.append_newline()
self.indent()
self.append(token_text)
def handle_end_block(self, token_text):
self.restore_mode()
if self.opts.brace_style == 'expand':
if self.last_text != '{':
self.append_newline()
else:
if self.last_type == 'TK_START_BLOCK':
if self.just_added_newline:
self.remove_indent()
else:
# {}
self.trim_output()
else:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.append(token_text)
def handle_word(self, token_text):
if self.do_block_just_closed:
self.append(' ')
self.append(token_text)
self.append(' ')
self.do_block_just_closed = False
return
if token_text == 'function':
if self.flags.var_line:
self.flags.var_line_reindented = not self.opts.keep_function_indentation
if (self.just_added_newline or self.last_text == ';') and self.last_text != '{':
# make sure there is a nice clean space of at least one blank line
# before a new function definition
have_newlines = self.n_newlines
if not self.just_added_newline:
have_newlines = 0
if not self.opts.preserve_newlines:
have_newlines = 1
for i in range(2 - have_newlines):
self.append_newline(False)
if token_text in ['case', 'default']:
if self.last_text == ':':
self.remove_indent()
else:
self.flags.indentation_level -= 1
self.append_newline()
self.flags.indentation_level += 1
self.append(token_text)
self.flags.in_case = True
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if token_text not in ['else', 'catch', 'finally']:
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.append(' ')
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode in ['BLOCK', 'DO_BLOCK']:
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_WORD':
if self.last_text == 'else':
# eat newlines between ...else *** some_op...
# won't preserve extra newlines in this place (if any), but don't care that much
self.trim_output(True)
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.append(' ')
prefix = 'NEWLINE'
if self.flags.if_line and self.last_type == 'TK_END_EXPR':
self.flags.if_line = False
if token_text in self.line_starters:
if self.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if token_text == 'function' and self.last_text in ['get', 'set']:
prefix = 'SPACE'
if token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
self.append(' ')
elif prefix == 'NEWLINE':
if token_text == 'function' and (self.last_type == 'TK_START_EXPR' or self.last_text in '=,'):
# no need to force newline on "function" -
# (function...
pass
elif token_text == 'function' and self.last_text == 'new':
self.append(' ')
elif self.is_special_word(self.last_text):
# no newline between return nnn
self.append(' ')
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or token_text != 'var') and self.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if token_text == 'if' and self.last_word == 'else' and self.last_text != '{':
self.append(' ')
else:
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif token_text in self.line_starters and self.last_text != ')':
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif self.is_array(self.flags.mode) and self.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.append(' ')
self.append(token_text)
self.last_word = token_text
if token_text == 'var':
self.flags.var_line = True
self.flags.var_line_reindented = False
self.flags.var_line_tainted = False
if token_text == 'if':
self.flags.if_line = True
if token_text == 'else':
self.flags.if_line = False
def handle_semicolon(self, token_text):
self.append(token_text)
self.flags.var_line = False
self.flags.var_line_reindented = False
if self.flags.mode == 'OBJECT':
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = 'BLOCK'
def handle_string(self, token_text):
if self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(COND-EXPRESSION)', '(FOR-EXPRESSION)']:
self.append(' ')
if self.last_type in ['TK_STRING', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_SEMICOLON']:
self.append_newline()
elif self.last_type == 'TK_WORD':
self.append(' ')
# Try to replace readable \x-encoded characters with their equivalent,
# if it is possible (e.g. '\x41\x42\x43\x01' becomes 'ABC\x01').
def unescape(match):
block, code = match.group(0, 1)
char = chr(int(code, 16))
if block.count('\\') == 1 and char in string.printable:
return char
return block
token_text = re.sub(r'\\{1,2}x([a-fA-F0-9]{2})', unescape, token_text)
self.append(token_text)
def handle_equals(self, token_text):
if self.flags.var_line:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.var_line_tainted = True
self.append(' ')
self.append(token_text)
self.append(' ')
def handle_operator(self, token_text):
space_before = True
space_after = True
if self.flags.var_line and token_text == ',' and self.is_expression(self.flags.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.var_line_tainted = False
if self.flags.var_line and token_text == ',':
if self.flags.var_line_tainted:
self.append(token_text)
self.flags.var_line_reindented = True
self.flags.var_line_tainted = False
self.append_newline()
return
else:
self.flags.var_line_tainted = False
if self.is_special_word(self.last_text):
# return had a special handling in TK_WORD
self.append(' ')
self.append(token_text)
return
if token_text == ':' and self.flags.in_case:
self.append(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append(token_text)
return
if token_text == ',':
if self.flags.var_line:
if self.flags.var_line_tainted:
# This never happens, as it's handled previously, right?
self.append(token_text)
self.append_newline()
self.flags.var_line_tainted = False
else:
self.append(token_text)
self.append(' ')
elif self.last_type == 'TK_END_BLOCK' and self.flags.mode != '(EXPRESSION)':
self.append(token_text)
if self.flags.mode == 'OBJECT' and self.last_text == '}':
self.append_newline()
else:
self.append(' ')
else:
if self.flags.mode == 'OBJECT':
self.append(token_text)
self.append_newline()
else:
# EXPR or DO_BLOCK
self.append(token_text)
self.append(' ')
# comma handled
return
elif token_text in ['--', '++', '!'] \
or (token_text in ['+', '-'] \
and self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']) \
or self.last_text in self.line_starters:
space_before = False
space_after = False
if self.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_WORD' and self.last_text in self.line_starters:
space_before = True
if self.flags.mode == 'BLOCK' and self.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == '.':
# decimal digits or object.property
space_before = False
elif token_text == ':':
if self.flags.ternary_depth == 0:
self.flags.mode = 'OBJECT'
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
if space_before:
self.append(' ')
self.append(token_text)
if space_after:
self.append(' ')
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
# all lines start with an asterisk? that's a proper box comment
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
self.append_newline()
self.append(lines[0])
for line in lines[1:]:
self.append_newline()
self.append(' ' + line.strip())
else:
# simple block comment: leave intact
if len(lines) > 1:
# multiline comment starts on a new line
self.append_newline()
else:
# single line /* ... */ comment stays on the same line
self.append(' ')
for line in lines:
self.append(line)
self.append('\n')
self.append_newline()
def handle_inline_comment(self, token_text):
self.append(' ')
self.append(token_text)
if self.is_expression(self.flags.mode):
self.append(' ')
else:
self.append_newline_forced()
def handle_comment(self, token_text):
if self.wanted_newline:
self.append_newline()
else:
self.append(' ')
self.append(token_text)
self.append_newline_forced()
def handle_unknown(self, token_text):
if self.last_text in ['return', 'throw']:
self.append(' ')
self.append(token_text)
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:djbkil:htf", ['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'help',
'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation'])
except getopt.GetoptError:
return usage()
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve_newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
return usage()
else:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
| mit | -1,422,547,196,316,502,300 | 34.746748 | 138 | 0.495997 | false |
detiber/ansible | test/sanity/validate-modules/utils.py | 33 | 3443 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <[email protected]>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast
import sys
# We only use StringIO, since we cannot setattr on cStringIO
from StringIO import StringIO
import yaml
import yaml.reader
def find_globals(g, tree):
"""Uses AST to find globals in an ast tree"""
for child in tree:
if hasattr(child, 'body') and isinstance(child.body, list):
find_globals(g, child.body)
elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
g.add(child.name)
continue
elif isinstance(child, ast.Assign):
try:
g.add(child.targets[0].id)
except (IndexError, AttributeError):
pass
elif isinstance(child, ast.Import):
g.add(child.names[0].name)
elif isinstance(child, ast.ImportFrom):
for name in child.names:
g_name = name.asname or name.name
if g_name == '*':
continue
g.add(g_name)
class CaptureStd():
"""Context manager to handle capturing stderr and stdout"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = self.stdout = StringIO()
sys.stderr = self.stderr = StringIO()
setattr(sys.stdout, 'encoding', self.sys_stdout.encoding)
setattr(sys.stderr, 'encoding', self.sys_stderr.encoding)
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
def get(self):
"""Return ``(stdout, stderr)``"""
return self.stdout.getvalue(), self.stderr.getvalue()
def parse_yaml(value, lineno, module, name, load_all=False):
traces = []
errors = []
data = None
if load_all:
loader = yaml.safe_load_all
else:
loader = yaml.safe_load
try:
data = loader(value)
if load_all:
data = list(data)
except yaml.MarkedYAMLError as e:
e.problem_mark.line += lineno - 1
e.problem_mark.name = '%s.%s' % (module, name)
errors.append('%s is not valid YAML. Line %d column %d' %
(name, e.problem_mark.line + 1,
e.problem_mark.column + 1))
traces.append(e)
except yaml.reader.ReaderError as e:
traces.append(e)
errors.append('%s is not valid YAML. Character '
'0x%x at position %d.' %
(name, e.character, e.position))
except yaml.YAMLError as e:
traces.append(e)
errors.append('%s is not valid YAML: %s: %s' % (name, type(e), e))
return data, errors, traces
| gpl-3.0 | 8,063,852,628,333,132,000 | 32.105769 | 74 | 0.598606 | false |
captiosus/treadmill | treadmill/infra/setup/ldap.py | 2 | 1481 | from treadmill.infra.setup import base_provision
from treadmill.infra import configuration, connection, constants, instances
from treadmill.api import ipa
import time
class LDAP(base_provision.BaseProvision):
def setup(
self,
image,
count,
key,
cidr_block,
tm_release,
instance_type,
app_root,
cell_subnet_id,
ipa_admin_password,
subnet_id=None
):
# TODO: remove count as parameter
count = 1
self.name = self.name + '-' + str(time.time())
hostname = self.name + '.' + connection.Connection.context.domain
otp = ipa.API().add_host(hostname=hostname)
ipa_server_hostname = instances.Instances.get_hostnames_by_roles(
vpc_id=self.vpc.id,
roles=[
constants.ROLES['IPA']
]
)[constants.ROLES['IPA']]
self.configuration = configuration.LDAP(
cell_subnet_id=cell_subnet_id,
tm_release=tm_release,
app_root=app_root,
hostname=hostname,
ipa_admin_password=ipa_admin_password,
ipa_server_hostname=ipa_server_hostname,
otp=otp
)
super().setup(
image=image,
count=count,
cidr_block=cidr_block,
subnet_id=subnet_id,
key=key,
instance_type=instance_type
)
| apache-2.0 | 5,280,554,688,259,472,000 | 28.62 | 75 | 0.538825 | false |
dethos/cloudroutes-service | src/actions/actions/saltstack-generic/__init__.py | 6 | 1476 | #!/usr/bin/python
######################################################################
# Cloud Routes Bridge
# -------------------------------------------------------------------
# Actions Module
######################################################################
import requests
import time
def action(**kwargs):
''' This method is called to action a reaction '''
redata = kwargs['redata']
jdata = kwargs['jdata']
logger = kwargs['logger']
run = True
# Check for Trigger
if redata['trigger'] > jdata['failcount']:
run = False
# Check for lastrun
checktime = time.time() - float(redata['lastrun'])
if checktime < redata['frequency']:
run = False
if redata['data']['call_on'] not in jdata['check']['status']:
run = False
if run:
return callSalt(redata, jdata, logger)
else:
return None
def callSalt(redata, jdata, logger):
''' Perform actual call '''
url = redata['data']['url']
payload = redata['data']
try:
req = requests.post(url, data=payload, timeout=3.00, verify=False)
except:
return False
if req.status_code == 200:
line = "saltstack-generic: Reqeust to %s sent for monitor %s - Successful" % (url, jdata['cid'])
logger.info(line)
return True
else:
line = "saltstack-generic: Request to %s sent for monitor %s - False" % (url, jdata['cid'])
logger.info(line)
return False
| agpl-3.0 | -2,527,293,623,996,610,600 | 27.941176 | 104 | 0.51084 | false |
nextgis/NextGIS_QGIS_open | python/plugins/processing/script/WrongScriptException.py | 46 | 1234 | # -*- coding: utf-8 -*-
"""
***************************************************************************
WrongScriptException.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
class WrongScriptException(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
| gpl-2.0 | 90,593,856,719,839,950 | 36.393939 | 75 | 0.394652 | false |
raiabril/arduino_logger | py2app-0.9-py2.7.egg/py2app/script_py2applet.py | 9 | 5469 | """
Create an applet from a Python script.
You can drag in packages, Info.plist files, icons, etc.
It's expected that only one Python script is dragged in.
"""
from __future__ import print_function
import os, sys
from distutils.core import setup
from plistlib import Plist
import py2app
import tempfile
import shutil
import imp
import pprint
from py2app.util import copy_tree
from py2app import build_app
try:
set
except NameError:
from sets import Set as set
if sys.version_info[0] == 3:
raw_input = input
HELP_TEXT = """
usage: py2applet --make-setup [options...] script.py [data files...]
or: py2applet [options...] script.py [data files...]
or: py2applet --help
"""
SETUP_TEMPLATE = '''"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = %s
DATA_FILES = %s
OPTIONS = %s
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
'''
def get_option_map():
optmap = {}
for option in build_app.py2app.user_options:
opt_long, opt_short = option[:2]
if opt_short:
optmap['-' + opt_short] = opt_long.rstrip('=')
return optmap
def get_cmd_options():
options = set()
for option in build_app.py2app.user_options:
opt_long, opt_short = option[:2]
if opt_long.endswith('=') and opt_short:
options.add('-' + opt_short)
return options
def main():
if not sys.argv[1:]:
print(HELP_TEXT)
return
scripts = []
data_files = []
packages = []
args = []
plist = {}
iconfile = None
parsing_options = True
next_is_option = False
cmd_options = get_cmd_options()
is_make_setup = False
for fn in sys.argv[1:]:
if parsing_options:
if next_is_option:
args.append(fn)
next_is_option = False
continue
elif fn == '--make-setup':
is_make_setup = True
continue
elif fn.startswith('-'):
args.append(fn)
if fn in cmd_options:
next_is_option = True
continue
parsing_options = False
if not is_make_setup:
fn = os.path.abspath(fn)
if fn.endswith('.py'):
if scripts:
data_files.append(fn)
else:
scripts.append(fn)
elif os.path.basename(fn) == 'Info.plist':
plist = Plist.fromFile(fn)
elif fn.endswith('.icns') and not iconfile:
iconfile = os.path.abspath(fn)
elif os.path.isdir(fn):
sys.path.insert(0, [os.path.dirname(fn)])
try:
path = imp.find_module(os.path.basename(fn))[0]
except ImportError:
path = ''
del sys.path[0]
if os.path.realpath(path) == os.path.realpath(fn):
packages.append(os.path.basename(fn))
else:
data_files.append(fn)
else:
data_files.append(fn)
options = dict(
packages=packages,
plist=plist,
iconfile=iconfile,
argv_emulation=True,
)
for k,v in list(options.items()):
if not v:
del options[k]
if is_make_setup:
make_setup(args, scripts, data_files, options)
else:
build(args, scripts, data_files, options)
def make_setup(args, scripts, data_files, options):
optmap = get_option_map()
cmd_options = get_cmd_options()
while args:
cmd = args.pop(0)
if cmd in cmd_options:
cmd = optmap[cmd]
options[cmd.replace('-', '_')] = args.pop(0)
elif '=' in cmd:
cmd, val = cmd.split('=', 1)
options[cmd.lstrip('-').replace('-', '_')] = val
else:
cmd = optmap.get(cmd, cmd)
options[cmd.lstrip('-').replace('-', '_')] = True
if os.path.exists('setup.py'):
res = ''
while res.lower() not in ('y', 'n'):
res = raw_input('Existing setup.py detected, replace? [Y/n] ')
if not res:
break
if res == 'n':
print('aborted!')
return
f = open('setup.py', 'w')
tvars = tuple(map(pprint.pformat, (scripts, data_files, options)))
f.write(SETUP_TEMPLATE % tvars)
f.flush()
f.close()
print('Wrote setup.py')
def build(args, scripts, data_files, options):
old_argv = sys.argv
sys.argv = [sys.argv[0], 'py2app'] + args
old_path = sys.path
path_insert = set()
for script in scripts:
path_insert.add(os.path.dirname(script))
sys.path = list(path_insert) + old_path
old_dir = os.getcwd()
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
try:
d = setup(
app=scripts,
data_files=data_files,
options={'py2app': options},
)
for target in d.app:
copy_tree(
target.appdir,
os.path.join(
os.path.dirname(target.script),
os.path.basename(target.appdir),
),
preserve_symlinks=True,
)
finally:
os.chdir(old_dir)
shutil.rmtree(tempdir, ignore_errors=True)
sys.argv = old_argv
sys.path = old_path
if __name__ == '__main__':
main()
| gpl-2.0 | -5,001,728,375,581,034,000 | 25.548544 | 74 | 0.536113 | false |
goddino/libjingle | trunk/third_party/yasm/source/patched-yasm/tools/python-yasm/tests/test_symrec.py | 32 | 2976 | from tests import TestCase, add
from yasm import SymbolTable, Expression, YasmError
class TSymbolTable(TestCase):
def setUp(self):
self.symtab = SymbolTable()
def test_keys(self):
self.assertEquals(len(self.symtab.keys()), 0)
self.symtab.declare("foo", None, 0)
keys = self.symtab.keys()
self.assertEquals(len(keys), 1)
self.assertEquals(keys[0], "foo")
def test_contains(self):
self.assert_("foo" not in self.symtab)
self.symtab.declare("foo", None, 0)
self.assert_("foo" in self.symtab)
def test_exception(self):
expr = Expression('+', 1, 2)
self.symtab.define_equ("foo", expr, 0)
self.assertRaises(YasmError, self.symtab.define_equ, "foo", expr, 0)
self.symtab.define_equ("bar", expr, 0) # cleared
self.assertRaises(YasmError, self.symtab.define_special, "bar",
'global')
def test_iters(self):
tab = self.symtab
tab.declare("foo", None, 0)
tab.declare("bar", None, 0)
tab.declare("baz", None, 0)
# while ordering is not known, it must be consistent
self.assertEquals(list(tab.keys()), list(tab.iterkeys()))
self.assertEquals(list(tab.values()), list(tab.itervalues()))
self.assertEquals(list(tab.items()), list(tab.iteritems()))
self.assertEquals(list(tab.iteritems()), zip(tab.keys(), tab.values()))
add(TSymbolTable)
class TSymbolAttr(TestCase):
def setUp(self):
self.symtab = SymbolTable()
self.declsym = self.symtab.declare("foo", None, 0)
def test_visibility(self):
sym = self.symtab.declare("local1", None, 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("local2", '', 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("local3", 'local', 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("global", 'global', 0)
self.assertEquals(sym.visibility, set(['global']))
sym = self.symtab.declare("common", 'common', 0)
self.assertEquals(sym.visibility, set(['common']))
sym = self.symtab.declare("extern", 'extern', 0)
self.assertEquals(sym.visibility, set(['extern']))
sym = self.symtab.declare("dlocal", 'dlocal', 0)
self.assertEquals(sym.visibility, set(['dlocal']))
self.assertRaises(ValueError,
lambda: self.symtab.declare("extern2", 'foo', 0))
def test_name(self):
self.assertEquals(self.declsym.name, "foo")
def test_equ(self):
self.assertRaises(AttributeError, lambda: self.declsym.equ)
def test_label(self):
self.assertRaises(AttributeError, lambda: self.declsym.label)
def test_is_special(self):
self.assertEquals(self.declsym.is_special, False)
def test_is_curpos(self):
self.assertEquals(self.declsym.is_curpos, False)
add(TSymbolAttr)
| bsd-3-clause | 5,083,966,370,018,586,000 | 36.2 | 79 | 0.622312 | false |
stanlyxiang/incubator-hawq | tools/bin/gppylib/gpMgmttest/__init__.py | 12 | 3044 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest2 as unittest
import time
class GpMgmtTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return GpMgmtTextTestResult(self.stream, self.descriptions, self.verbosity)
class GpMgmtTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
super(GpMgmtTextTestResult, self).__init__(stream, descriptions, verbosity)
self.verbosity = verbosity
self.startTime = 0
def getDescription(self, test):
case_name, full_name = test.__str__().split()
suite_name, class_name = full_name.strip('()').rsplit('.',1)
if self.verbosity > 1:
if test.shortDescription():
return 'Test Suite Name|%s|Test Case Name|%s|Test Details|%s' % (suite_name, case_name, test.shortDescription())
else:
return 'Test Suite Name|%s|Test Case Name|%s|Test Details|' % (suite_name, case_name)
def startTest(self, test):
super(GpMgmtTextTestResult, self).startTest(test)
self.startTime = test.start_time = time.time()
def addSuccess(self, test):
test.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addSuccess(test)
def addError(self, test, err):
test.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addError(test, err)
def addFailure(self, test, err):
test.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addFailure(test, err)
def addSkip(self, test, err):
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addSkip(test, err)
def addExpectedFailure(self, test, err):
self.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addExpectedFailure(test, err)
def _show_run_time(self):
etime = time.time()
elapsed = etime - self.startTime
self.stream.write('(%4.2f ms)' % (elapsed*1000))
| apache-2.0 | -3,238,256,107,897,755,000 | 39.052632 | 128 | 0.667871 | false |
frioux/offlineimap | offlineimap/mbnames.py | 10 | 3676 | # Mailbox name generator
#
# Copyright (C) 2002-2015 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os.path
import re # for folderfilter
from threading import Lock
boxes = {}
localroots = {}
config = None
accounts = None
mblock = Lock()
def init(conf, accts):
global config, accounts
config = conf
accounts = accts
def add(accountname, foldername, localfolders):
if not accountname in boxes:
boxes[accountname] = []
localroots[accountname] = localfolders
if not foldername in boxes[accountname]:
boxes[accountname].append(foldername)
def write(allcomplete):
incremental = config.getdefaultboolean("mbnames", "incremental", False)
# Skip writing if we don't want incremental writing and we're not done.
if not incremental and not allcomplete:
return
# Skip writing if we want incremental writing and we're done.
if incremental and allcomplete:
return
# See if we're ready to write it out.
for account in accounts:
if account not in boxes:
return
__genmbnames()
def __genmbnames():
"""Takes a configparser object and a boxlist, which is a list of hashes
containing 'accountname' and 'foldername' keys."""
xforms = [os.path.expanduser, os.path.expandvars]
mblock.acquire()
try:
localeval = config.getlocaleval()
if not config.getdefaultboolean("mbnames", "enabled", 0):
return
path = config.apply_xforms(config.get("mbnames", "filename"), xforms)
file = open(path, "wt")
file.write(localeval.eval(config.get("mbnames", "header")))
folderfilter = lambda accountname, foldername: 1
if config.has_option("mbnames", "folderfilter"):
folderfilter = localeval.eval(config.get("mbnames", "folderfilter"),
{'re': re})
mb_sort_keyfunc = lambda d: (d['accountname'], d['foldername'])
if config.has_option("mbnames", "sort_keyfunc"):
mb_sort_keyfunc = localeval.eval(config.get("mbnames", "sort_keyfunc"),
{'re': re})
itemlist = []
for accountname in boxes.keys():
localroot = localroots[accountname]
for foldername in boxes[accountname]:
if folderfilter(accountname, foldername):
itemlist.append({'accountname': accountname,
'foldername': foldername,
'localfolders': localroot})
itemlist.sort(key = mb_sort_keyfunc)
format_string = config.get("mbnames", "peritem", raw=1)
itemlist = [format_string % d for d in itemlist]
file.write(localeval.eval(config.get("mbnames", "sep")).join(itemlist))
file.write(localeval.eval(config.get("mbnames", "footer")))
file.close()
finally:
mblock.release()
| apache-2.0 | -6,548,077,246,584,218,000 | 37.694737 | 83 | 0.630849 | false |
llonchj/sentry | tests/sentry/utils/auth/tests.py | 30 | 1779 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from sentry.models import User
from sentry.testutils import TestCase
from sentry.utils.auth import EmailAuthBackend, get_login_redirect
class EmailAuthBackendTest(TestCase):
def setUp(self):
self.user = User(username="foo", email="[email protected]")
self.user.set_password("bar")
self.user.save()
@property
def backend(self):
return EmailAuthBackend()
def test_can_authenticate_with_username(self):
result = self.backend.authenticate(username='foo', password='bar')
self.assertEquals(result, self.user)
def test_can_authenticate_with_email(self):
result = self.backend.authenticate(username='[email protected]', password='bar')
self.assertEquals(result, self.user)
def test_does_not_authenticate_with_invalid_password(self):
result = self.backend.authenticate(username='foo', password='pizza')
self.assertEquals(result, None)
class GetLoginRedirectTest(TestCase):
def make_request(self, next=None):
request = HttpRequest()
request.session = {}
request.user = self.user
if next:
request.session['_next'] = next
return request
def test_schema_uses_default(self):
result = get_login_redirect(self.make_request('http://example.com'))
assert result == reverse('sentry')
def test_login_uses_default(self):
result = get_login_redirect(self.make_request(reverse('sentry-login')))
assert result == reverse('sentry')
def test_no_value_uses_default(self):
result = get_login_redirect(self.make_request())
assert result == reverse('sentry')
| bsd-3-clause | -3,169,021,232,812,707,300 | 32.566038 | 86 | 0.677909 | false |
jburger424/MediaQueueHCI | m-q-env/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 982 | 19608 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
| mit | -1,982,911,184,683,219,700 | 33.10087 | 93 | 0.492095 | false |
sporksmith/polygraph | experiments/workloads/generate_workloads.py | 2 | 2794 | #!/usr/bin/env python
# Polygraph (release 0.1)
# Signature generation algorithms for polymorphic worms
#
# Copyright (c) 2004-2005, Intel Corporation
# All Rights Reserved
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
def create_workload(generator, filename):
import cPickle
workload = [sample for sample in generator]
f = open(filename, 'w')
cPickle.dump(workload, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def create_noise_workload(tracefile, count, filename):
# get total number of streams in the trace
import polygraph.trace_crunching.stream_trace as stream_trace
s = stream_trace.StreamTrace(tracefile)
# select which streams to use
import random
indices = range(s.numstreams())
random.shuffle(indices)
indices = indices[:count]
indices.sort()
# get those streams
workload = []
for i in indices:
s.seek(i)
workload.append(s.next())
if s.numstreams() < count:
print '*' * 80
print 'WARNING: Only %d streams in %s, need %d to' % \
(s.numstreams(), tracefile, count)
print 'generate noise workload. Will cludge by duplicating'
print 'streams as necessary.'
print '*' * 80
workload *= (count / s.numstreams()) + 1
workload = workload[:count]
random.shuffle(workload)
# write to file
import cPickle
f = open(filename, 'w')
cPickle.dump(workload, f, cPickle.HIGHEST_PROTOCOL)
f.close()
if __name__ == '__main__':
# these should correspond to the largest workload needed
trials=5
dynamic_range=range(2,10)+range(10,50,5)+range(50,101,10)
addtl = 1000
number = trials * sum(dynamic_range) + addtl
#http noise
import sys
sys.path.append('../')
import config
create_noise_workload(config.traces[80]['eval'], number,'http_noise.pickle')
#dns noise
create_noise_workload(config.traces[53]['eval'], number, 'dns_noise.pickle')
#atphttpd workload
import polygraph.worm_gen.atphttpd as atphttpd
create_workload(atphttpd.ATPhttpd().generate(number), 'atphttpd.pickle')
#apache knacker workload
import polygraph.worm_gen.apache_host as apache_host
create_workload(apache_host.ApacheHost().generate(number), 'apache.pickle')
#lion (dns tsig) workload
import polygraph.worm_gen.bindTSIG as bindTSIG
create_workload(bindTSIG.bindTSIG().generate(number), 'tsig.pickle')
#clet workload
import polygraph.worm_gen.clet as clet
create_workload(clet.Clet().generate(number), 'clet.pickle')
| epl-1.0 | -5,987,088,197,071,701,000 | 30.393258 | 80 | 0.671797 | false |
iychoi/syndicate | old/ms/django_lib/gatewayforms.py | 2 | 4283 | '''
John Whelchel
Summer 2013
Forms used just for gateways (AG, RG, and UG).
'''
from django import forms
from django_lib.forms import LONGEST_CHAR_FIELD, LONGEST_PASS_FIELD, LONGEST_JSON_FIELD, ReadOnlyWidget
LARGEST_PORT = 65535
class ModifyGatewayConfig(forms.Form):
json_config = forms.FileField(required=False,
label="Gateway Configuration"
)
class ChangeVolume(forms.Form):
volume_name = forms.CharField(label="New Volume name",
max_length=LONGEST_CHAR_FIELD)
class ModifyGatewayLocation(forms.Form):
host = forms.CharField(label="New Gateway host",
max_length = LONGEST_CHAR_FIELD)
port = forms.IntegerField(label="New Port number",
max_value=LARGEST_PORT)
class GatewayRemoveVolume(forms.Form):
volume_name = forms.CharField(label="Volume name",
widget=ReadOnlyWidget(),
required=False,
max_length=LONGEST_CHAR_FIELD)
remove = forms.BooleanField(label="Remove",
required=False)
class GatewayAddVolume(forms.Form):
volume_name = forms.CharField(label="Volume name",
max_length=LONGEST_CHAR_FIELD)
class DeleteGateway(forms.Form):
confirm_delete = forms.BooleanField(required=True,
label="Yes, I understand that this action is permament and my gateway will be gone.")
g_password = forms.CharField(label="Gateway password",
max_length=LONGEST_PASS_FIELD,
widget=forms.PasswordInput,
help_text="You must also own this gateway to delete it.")
class CreateGateway(forms.Form):
g_name = forms.CharField(label="Gateway name",
initial="My Gateway",
max_length=LONGEST_CHAR_FIELD,
help_text="Your gateway's name cannot be changed later.")
g_password = forms.CharField(label="Gateway password",
max_length=LONGEST_CHAR_FIELD,
widget=forms.PasswordInput)
host = forms.CharField(label="Host name",
max_length=LONGEST_CHAR_FIELD,)
port = forms.IntegerField(label="Port number",
max_value=LARGEST_PORT)
class CreateUG(CreateGateway):
volume_name = forms.CharField(label="Volume name (optional)",
required=False,
max_length=LONGEST_CHAR_FIELD)
read_write = forms.BooleanField(required=False,
label="UG can write to other gateways.")
class CreateAG(CreateGateway):
json_config = forms.FileField(required=False,
label="Gateway Configuration",
help_text="If no file is specified, blank config will be used.")
json_config_text = forms.CharField(required=False,
max_length=LONGEST_JSON_FIELD,
widget=forms.Textarea,
label="Gateway Configuration (alternate)",
help_text="This can also be used to manually config the gateway with text in JSON format. The upload file will take priority however.")
class CreateRG(CreateGateway):
json_config = forms.FileField(required=False,
label="Gateway Configuration",
help_text="If no file is specified, blank config will be used.")
json_config_text = forms.CharField(required=False,
max_length=LONGEST_JSON_FIELD,
widget=forms.Textarea,
label="Gateway Configuration (alternate)",
help_text="This can also be used to manually config the gateway with text in JSON format. The upload file will take priority however.")
private = forms.BooleanField(required=False,
label="Replica Gateway is private. It can only be attached to volumes owned by you.") | apache-2.0 | -6,452,875,492,919,408,000 | 35.931034 | 175 | 0.564791 | false |
OmnesRes/onco_lnc | mrna/cox/LGG/patient_info.py | 1 | 7037 | ## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| mit | 2,317,178,385,927,912,000 | 30.275556 | 132 | 0.669177 | false |
shootstar/ctest | ceilometer/compute/manager.py | 2 | 3043 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012-2013 eNovance <[email protected]>
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ceilometer import agent
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer import extension_manager
from ceilometer import nova_client
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class PollingTask(agent.PollingTask):
def poll_and_publish_instances(self, instances):
with self.publish_context as publisher:
for instance in instances:
if getattr(instance, 'OS-EXT-STS:vm_state', None) != 'error':
# TODO(yjiang5) passing counters to get_counters to avoid
# polling all counters one by one
for pollster in self.pollsters:
try:
LOG.info("Polling pollster %s", pollster.name)
publisher(list(pollster.obj.get_counters(
self.manager,
instance)))
except Exception as err:
LOG.warning('Continue after error from %s: %s',
pollster.name, err)
LOG.exception(err)
def poll_and_publish(self):
self.poll_and_publish_instances(
self.manager.nv.instance_get_all_by_host(cfg.CONF.host))
class AgentManager(agent.AgentManager):
def __init__(self):
super(AgentManager, self).__init__(
extension_manager.ActivatedExtensionManager(
namespace='ceilometer.poll.compute',
disabled_names=cfg.CONF.disabled_compute_pollsters,
),
)
self._inspector = virt_inspector.get_hypervisor_inspector()
self.nv = nova_client.Client()
def create_polling_task(self):
return PollingTask(self)
def setup_notifier_task(self):
"""For nova notifier usage."""
task = PollingTask(self)
for pollster in self.pollster_manager.extensions:
task.add(
pollster,
self.pipeline_manager.pipelines)
self.notifier_task = task
def poll_instance(self, context, instance):
"""Poll one instance."""
self.notifier_task.poll_and_publish_instances([instance])
@property
def inspector(self):
return self._inspector
| apache-2.0 | 4,091,869,632,406,898,700 | 35.214286 | 77 | 0.615713 | false |
sshleifer/object_detection_kitti | inception/inception/slim/variables.py | 23 | 10358 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for creating variables in TF-Slim.
The variables module is typically used for defining model variables from the
ops routines (see slim.ops). Such variables are used for training, evaluation
and inference of models.
All the variables created through this module would be added to the
MODEL_VARIABLES collection, if you create a model variable outside slim, it can
be added with slim.variables.add_variable(external_variable, reuse).
Usage:
weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
weights = variables.variable('weights',
shape=[100, 100],
initializer=weights_initializer,
regularizer=l2_regularizer,
device='/cpu:0')
biases = variables.variable('biases',
shape=[100],
initializer=tf.zeros_initializer(),
device='/cpu:0')
# More complex example.
net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
with slim.arg_scope([variables.variable], restore=False):
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
# Get all model variables from all the layers.
model_variables = slim.variables.get_variables()
# Get all model variables from a specific the layer, i.e 'conv1'.
conv1_variables = slim.variables.get_variables('conv1')
# Get all weights from all the layers.
weights = slim.variables.get_variables_by_name('weights')
# Get all bias from all the layers.
biases = slim.variables.get_variables_by_name('biases')
# Get all variables to restore.
# (i.e. only those created by 'conv1' and 'conv2')
variables_to_restore = slim.variables.get_variables_to_restore()
************************************************
* Initializing model variables from a checkpoint
************************************************
# Create some variables.
v1 = slim.variables.variable(name="v1", ..., restore=False)
v2 = slim.variables.variable(name="v2", ...) # By default restore=True
...
# The list of variables to restore should only contain 'v2'.
variables_to_restore = slim.variables.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import scopes
# Collection containing all the variables created using slim.variables
MODEL_VARIABLES = '_model_variables_'
# Collection containing the slim.variables that are created with restore=True.
VARIABLES_TO_RESTORE = '_variables_to_restore_'
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates
def get_variables_to_restore():
"""Gets the list of variables to restore.
Returns:
a copied list of variables.
"""
return tf.get_collection(VARIABLES_TO_RESTORE)[:]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=given_name)
def get_unique_variable(name):
"""Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
class VariableDeviceChooser(object):
"""Slim device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU:0 placement otherwise CPU:0.
"""
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
"""Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
"""
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
def __call__(self, op):
device_string = ''
if self._num_ps > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_ps
device_string = '%s/task:%d' % (self._ps_device, task_id)
device_string += '/%s' % self._placement
return device_string
# TODO(sguada) Remove once get_variable is able to colocate op.devices.
def variable_device(device, name):
"""Fix the variable device to colocate its ops."""
if callable(device):
var_name = tf.get_variable_scope().name + '/' + name
var_def = tf.NodeDef(name=var_name, op='Variable')
device = device(var_def)
if device is None:
device = ''
return device
@scopes.add_arg_scope
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False, collections=collections)
@scopes.add_arg_scope
def variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
"""Gets an existing variable with these parameters or creates a new one.
It also add itself to a group with its name.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES
and MODEL_VARIABLES collections.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
# Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES
collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES]
# Add to VARIABLES_TO_RESTORE if necessary
if restore:
collections.append(VARIABLES_TO_RESTORE)
# Remove duplicates
collections = set(collections)
# Get the device for the variable.
with tf.device(variable_device(device, name)):
return tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections)
| apache-2.0 | 6,873,595,435,408,468,000 | 34.84083 | 86 | 0.682564 | false |
PingaxAnalytics/koob_auth | testproject/ptvs_virtualenv_proxy.py | 1 | 4166 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import datetime
import os
import sys
import traceback
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
def execfile(path, global_dict):
"""Execute a file"""
with open(path, 'r') as f:
code = f.read()
code = code.replace('\r\n', '\n') + '\n'
exec (code, global_dict)
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = open(log_file, 'a+')
try:
f.write('%s: %s' % (datetime.datetime.now(), txt))
finally:
f.close()
ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET')
if ptvsd_secret:
log('Enabling ptvsd ...\n')
try:
import ptvsd
try:
ptvsd.enable_attach(ptvsd_secret)
log('ptvsd enabled.\n')
except:
log('ptvsd.enable_attach failed\n')
except ImportError:
log('error importing ptvsd.\n');
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_ALT_VIRTUALENV_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
last_tb = ''
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
last_tb = ''
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
last_tb = ': ' + traceback.format_exc()
if handler is None:
raise ValueError('"%s" could not be imported%s' % (handler_name, last_tb))
return handler
activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS')
if not activate_this:
raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set')
def get_virtualenv_handler():
log('Activating virtualenv with %s\n' % activate_this)
execfile(activate_this, dict(__file__=activate_this))
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
def get_venv_handler():
log('Activating venv with executable at %s\n' % activate_this)
import site
sys.executable = activate_this
old_sys_path, sys.path = sys.path, []
site.main()
sys.path.insert(0, '')
for item in old_sys_path:
if item not in sys.path:
sys.path.append(item)
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler | mit | 5,324,697,217,613,740,000 | 31.053846 | 96 | 0.597936 | false |
MiniSEC/GRR_clone | client/client_actions/linux/ko_patcher.py | 6 | 7766 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""A kernel module rewriter.
This is a hack that rewrites kernel modules such that they can be loaded on
kernels they were not compiled for.
"""
import os
import platform
import struct
import sys
import logging
from grr.lib import flags
class KernelObjectPatcher(object):
"""The kernel object patching class."""
ELF_MAGIC = "\x7F\x45\x4C\x46"
def __init__(self, log=False):
self.log = log
def GetSectionOffsets(self, file_data):
"""Returns offsets and lengths of all the sections of this elf file."""
if file_data[:4] != self.ELF_MAGIC:
raise RuntimeError("Not an elf file.")
section_header_offset = struct.unpack("<Q", file_data[40:40+8])[0]
(section_header_size, num_section_headers,
string_table) = struct.unpack("<HHH", file_data[58:58+6])
# Read the string table first.
start = section_header_offset + string_table * section_header_size
header_data = file_data[start:start + section_header_size]
offset, size = struct.unpack("<IIQQQQIIQQ", header_data)[4:6]
string_data = file_data[offset:offset+size]
sections = {}
for start in xrange(section_header_offset,
section_header_offset + (
num_section_headers * section_header_size),
section_header_size):
header_data = file_data[start: start + section_header_size]
header = struct.unpack("<IIQQQQIIQQ", header_data)
name_offset, data_offset, data_size = header[0], header[4], header[5]
name = string_data[name_offset:string_data.find("\x00", name_offset)]
if data_size:
sections[name] = (data_offset, data_size)
return sections
def ParseVersionSection(self, version_data):
"""Returns the checksums found for all the imports."""
checksums = {}
while version_data:
act_version = version_data[:0x40]
version_data = version_data[0x40:]
function = act_version[8:]
chksum = act_version[:8]
checksums[function] = chksum
return checksums
def GetImportedVersions(self, file_data, sections):
if "__versions" not in sections:
return {}
start, length = sections["__versions"]
version_data = file_data[start:start+length]
return self.ParseVersionSection(version_data)
def GetModuleVersion(self, file_data, sections):
info_start, info_length = sections[".modinfo"]
modinfo = file_data[info_start:info_start + info_length]
for line in modinfo.split("\x00"):
if line.startswith("vermagic"):
return line[len("vermagic") + 1:]
msg = "Could not find vermagic string."
logging.info(msg)
raise RuntimeError(msg)
def _RewriteModinfo(self, modinfo, obj_kernel_version, this_kernel_version,
info_strings=None, to_remove=None):
new_modinfo = ""
for line in modinfo.split("\x00"):
if not line:
continue
if to_remove and line.split("=")[0] == to_remove:
continue
if info_strings is not None:
info_strings.add(line.split("=")[0])
if line.startswith("vermagic"):
line = line.replace(obj_kernel_version, this_kernel_version)
new_modinfo += line + "\x00"
return new_modinfo
def RewriteModinfo(self, file_data, sections, obj_kernel_version,
this_kernel_version):
"""This rewrites the modinfo section and updates the kernel version."""
info_start, info_length = sections[".modinfo"]
modinfo = file_data[info_start:info_start + info_length]
info_strings = set()
new_modinfo = self._RewriteModinfo(modinfo, obj_kernel_version,
this_kernel_version, info_strings)
if len(new_modinfo) <= info_length:
new_modinfo += "\x00" * (info_length - len(new_modinfo))
return new_modinfo
logging.info("Rewritten modinfo section is too big.")
info_strings -= set(["vermagic", "srcversion", "depends"])
try:
to_remove = info_strings.pop()
except KeyError:
msg = "Could not remove anything from modinfo, giving up."
logging.info(msg)
raise RuntimeError(msg)
logging.info("Will try to remove %s from modinfo.", to_remove)
return self._RewriteModinfo(modinfo, obj_kernel_version,
this_kernel_version, to_remove=to_remove)
def GetKnownImports(self, needed_versions):
"""Parses the driver directory to find valid import checksums."""
needed_versions = set(needed_versions)
found_versions = {}
driver_path = "/lib/modules/%s/kernel/drivers" % platform.uname()[2]
num_files = 0
for (directory, _, files) in os.walk(driver_path):
for filename in files:
if filename[-3:] == ".ko":
try:
fd = open("%s/%s" % (directory, filename), "rb")
num_files += 1
data = fd.read()
sections = self.GetSectionOffsets(data)
versions = self.GetImportedVersions(data, sections)
found_versions.update(versions)
if set(found_versions.keys()) >= needed_versions:
logging.info("All imports found, gathered data from %d modules.",
num_files)
return found_versions
except IOError:
pass
missing = needed_versions - set(found_versions.keys())
msg = "Imports %s could not be found." % ",".join(missing)
logging.info(msg)
raise RuntimeError(msg)
def ReplaceSection(self, file_data, offset, new_section_data):
result = file_data[:offset]
result += new_section_data
result += file_data[offset + len(new_section_data):]
return result
def Patch(self, file_data, force_patch=False):
try:
sections = self.GetSectionOffsets(file_data)
obj_version = self.GetModuleVersion(file_data, sections)
obj_kernel_version = obj_version.split(" ")[0]
this_kernel_version = platform.uname()[2]
logging.info("Module version is %s, kernel version is %s.",
obj_kernel_version, this_kernel_version)
if obj_kernel_version == this_kernel_version and not force_patch:
return file_data
needed_imports = self.GetImportedVersions(file_data, sections)
good_imports = self.GetKnownImports(needed_imports)
rewritten_version_data = ""
for function in needed_imports.keys():
if needed_imports[function] == good_imports[function]:
logging.info("Matching checksum for %s.",
function.replace("\x00", ""))
else:
logging.info("Checksum mismatch for %s.",
function.replace("\x00", ""))
rewritten_version_data += good_imports[function] + function
rewritten_modinfo_data = self.RewriteModinfo(
file_data, sections, obj_kernel_version, this_kernel_version)
file_data = self.ReplaceSection(file_data, sections["__versions"][0],
rewritten_version_data)
file_data = self.ReplaceSection(file_data, sections[".modinfo"][0],
rewritten_modinfo_data)
return file_data
except (RuntimeError, KeyError) as e:
logging.info(str(e))
# Something didn't work, we can just use the data we were sent.
return file_data
def main(_):
if len(sys.argv) < 3:
print "Usage: python %s <kernel_module> <outfile>" % sys.argv[0]
exit()
in_fd = open(sys.argv[1], "rb")
out_data = KernelObjectPatcher(log=True).Patch(in_fd.read(), force_patch=True)
with open(sys.argv[2], "wb") as out_fd:
out_fd.write(out_data)
logging.info("Kernel Object patched.")
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | -3,743,840,520,247,342,000 | 33.362832 | 80 | 0.626577 | false |
airbnb/streamalert | streamalert/apps/_apps/aliyun.py | 1 | 8258 | """
Copyright 2018-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import json
import re
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ServerException, ClientException
from aliyunsdkactiontrail.request.v20171204 import LookupEventsRequest
from . import AppIntegration, StreamAlertApp, get_logger
LOGGER = get_logger(__name__)
@StreamAlertApp
class AliyunApp(AppIntegration):
"""An app that collects events from the ActionTrail feature of Aliyun.
Results are paginated, with a NextToken field provided that is used in subsequent queries.
The result of a successful api call is json whose outermost schema is
{
"EndTime": <end of the time range of events>,
"NextToken": <the token to use to request the next page of data>,
"RequestId": <the ID of the request>,
"StartTime": <start of the time range of events>,
"Events": [entries],
}
If there are no more events in the queried range, the 'NextToken' element is not present.
Resource:
https://www.alibabacloud.com/help/doc-detail/28849.htm
"""
# The maximum number of results to be returned. Valid values: 0 to 50.
_MAX_RESULTS = 50
# In aliyun sdk PR https://github.com/aliyun/aliyun-openapi-python-sdk/pull/216, it separates
# timeout to connection and read timeout and also lower the default connection timeout time
# from 10 to 5 seconds. We notice the connection to server gets timed out more often recently,
# increase default timeout will be helpful.
_CONNECT_TIMEOUT = 15
_READ_TIMEOUT = 15
def __init__(self, event, context):
super(AliyunApp, self).__init__(event, context)
auth = self._config.auth
self.client = AcsClient(auth['access_key_id'], auth['access_key_secret'], auth['region_id'])
self.request = LookupEventsRequest.LookupEventsRequest()
self.request.set_MaxResults(self._MAX_RESULTS)
self.request.set_StartTime(self._config.last_timestamp)
# Source code can be found here https://github.com/aliyun/aliyun-openapi-python-sdk/
# blob/master/aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20171204/
# LookupEventsRequest.py
self.request.set_EndTime(datetime.utcnow().strftime(self.date_formatter()))
self.request.set_connect_timeout(self._CONNECT_TIMEOUT)
self.request.set_read_timeout(self._READ_TIMEOUT)
@classmethod
def _type(cls):
return 'actiontrail'
@classmethod
def service(cls):
return 'aliyun'
@classmethod
def date_formatter(cls):
"""Return a format string for a date, ie: 2014-05-26T12:00:00Z
This format is consistent with the format used by the Aliyun API:
https://www.alibabacloud.com/help/doc-detail/28849.htm
"""
return '%Y-%m-%dT%H:%M:%SZ'
def _gather_logs(self):
"""Fetch ActionTrail events and return a list of events
Example response from do_action_with_exception method
{
'EndTime': '2019-08-22T04:41:32Z',
'NextToken': '2',
'RequestId': '562D9C08-E766-4038-B49F-B0D2BE1980FE',
'StartTime': '2019-08-01T04:31:52Z',
'Events': [{
'eventId': '60.152_1566447558068_1247',
'eventVersion': '1',
'acsRegion': 'cn-hangzhou',
'additionalEventData': {
'mfaChecked': 'true',
'callbackUrl': 'https://home.console.aliyun.com/'
},
'eventType': 'ConsoleSignin',
'errorMessage': 'success',
'eventTime': '2019-08-22T04:19:18Z',
'eventName': 'ConsoleSignin',
'userIdentity': {
'userName': 'dead_joke',
'type': 'ram-user',
'principalId': '222222222222222222',
'accountId': '1111111111111111'
},
'eventSource': 'signin.aliyun.com',
'requestId': '60.152_1566447558068_1247',
'userAgent': 'some browser version',
'sourceIpAddress': '1.1.1.1',
'serviceName': 'AasSub'
}, {
'eventId': '029B39F0-5E23-4931-B4C9-BA72C7261ADF',
...
'eventTime': '2019-08-21T22:26:09Z',
...
}]
}
"""
try:
response = self.client.do_action_with_exception(self.request)
json_response = json.loads(response)
# Note: ActionTrail API return ActionTrail events in sorted order, and
# it is latest events first. There still has a small chance that it may not get
# all the logs when there are still more logs to pull when lambda function
# timeout reached, and remaining logs will be lost because the last_timestamp
# is updated to "EndTime" during the first lambda function call.
#
# To lower the data loss possibility, suggest to have longer timeout for lambda
# function (aliyun app) and set app schedule more frequently, e.g. every 10 mins
self._last_timestamp = json_response['EndTime']
if 'NextToken' in json_response:
self._more_to_poll = True
self.request.set_NextToken(json_response['NextToken'])
else:
self._more_to_poll = False
return json_response['Events']
except (ServerException, ClientException) as e:
LOGGER.exception("%s error occurred", e.get_error_type())
raise
@classmethod
def _required_auth_info(cls):
"""Required credentials for access to the resources"""
def region_validator(region):
"""Region names pulled from https://www.alibabacloud.com/help/doc-detail/40654.htm"""
if region in {'cn-qingdao', 'cn-beijing', 'cn-zhangjiakou', 'cn-huhehaote',
'cn-hangzhou', 'cn-shanghai', 'cn-shenzhen', 'cn-hongkong',
'ap-southeast-1', 'ap-southeast-2', 'ap-southeast-3', 'ap-southeast-5',
'ap-northeast-1', 'ap-south-1', 'us-west-1', 'us-east-1',
'eu-central-1', 'me-east-1'}:
return region
return False
return {
'access_key_id': {
'description': ('The access key id generated for a RAM user. This '
'should be a string of alphanumeric characters.'),
'format': re.compile(r'.*')
},
'access_key_secret': {
'description': ('The access key secret generated for a RAM user. This '
'should be a string of alphanumeric characters.'),
'format': re.compile(r'.*')
},
'region_id': {
'description': ('The region for the Aliyun API. This should be '
'a string like \'ap-northeast-1\'.'),
'format': region_validator
},
}
@classmethod
def _sleep_seconds(cls):
"""Return the number of seconds this polling function should sleep for
between requests to avoid failed requests. The Aliyun documentation doesn't
list limits on the requests portion of the actionTrail feature, so the only
limit is the general limit on Aliyun API requests, which is no more than
100 per second. We can set this value to 0 safely.
Resource:
https://www.alibabacloud.com/help/doc-detail/29474.htm
Returns:
int: Number of seconds the polling function should sleep for
"""
return 0
| apache-2.0 | 4,422,241,922,752,377,300 | 38.89372 | 100 | 0.609712 | false |
agaffney/ansible | test/support/windows-integration/plugins/modules/win_acl.py | 85 | 4112 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Phil Schwartz <[email protected]>
# Copyright: (c) 2015, Trond Hindenes
# Copyright: (c) 2015, Hans-Joachim Kliemeck <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_acl
version_added: "2.0"
short_description: Set file/directory/registry permissions for a system user or group
description:
- Add or remove rights/permissions for a given user or group for the specified
file, folder, registry key or AppPool identifies.
options:
path:
description:
- The path to the file or directory.
type: str
required: yes
user:
description:
- User or Group to add specified rights to act on src file/folder or
registry key.
type: str
required: yes
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified access rule.
type: str
choices: [ absent, present ]
default: present
type:
description:
- Specify whether to allow or deny the rights specified.
type: str
required: yes
choices: [ allow, deny ]
rights:
description:
- The rights/permissions that are to be allowed/denied for the specified
user or group for the item at C(path).
- If C(path) is a file or directory, rights can be any right under MSDN
FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx).
- If C(path) is a registry key, rights can be any right under MSDN
RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx).
type: str
required: yes
inherit:
description:
- Inherit flags on the ACL rules.
- Can be specified as a comma separated list, e.g. C(ContainerInherit),
C(ObjectInherit).
- For more information on the choices see MSDN InheritanceFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx).
- Defaults to C(ContainerInherit, ObjectInherit) for Directories.
type: str
choices: [ ContainerInherit, ObjectInherit ]
propagation:
description:
- Propagation flag on the ACL rules.
- For more information on the choices see MSDN PropagationFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx).
type: str
choices: [ InheritOnly, None, NoPropagateInherit ]
default: "None"
notes:
- If adding ACL's for AppPool identities (available since 2.3), the Windows
Feature "Web-Scripting-Tools" must be enabled.
seealso:
- module: win_acl_inheritance
- module: win_file
- module: win_owner
- module: win_stat
author:
- Phil Schwartz (@schwartzmx)
- Trond Hindenes (@trondhindenes)
- Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = r'''
- name: Restrict write and execute access to User Fed-Phil
win_acl:
user: Fed-Phil
path: C:\Important\Executable.exe
type: deny
rights: ExecuteFile,Write
- name: Add IIS_IUSRS allow rights
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Set registry key right
win_acl:
path: HKCU:\Bovine\Key
user: BUILTIN\Users
rights: EnumerateSubKeys
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Remove FullControl AccessRule for IIS_IUSRS
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: absent
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Deny Intern
win_acl:
path: C:\Administrator\Documents
user: Intern
rights: Read,Write,Modify,FullControl,Delete
type: deny
state: present
'''
| gpl-3.0 | -681,842,064,157,582,800 | 30.151515 | 119 | 0.6982 | false |
fiunchinho/ansible-modules-extras | notification/telegram.py | 42 | 2670 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Artem Feofanov <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: telegram
version_added: "2.2"
author: "Artem Feofanov (@tyouxa)"
short_description: module for sending notifications via telegram
description:
- Send notifications via telegram bot, to a verified group or user
notes:
- You will require a telegram account and create telegram bot to use this module.
options:
msg:
description:
- What message you wish to send.
required: true
token:
description:
- Token identifying your telegram bot.
required: true
chat_id:
description:
- Telegram group or user chat_id
required: true
"""
EXAMPLES = """
send a message to chat in playbook
- telegram: token=bot9999999:XXXXXXXXXXXXXXXXXXXXXXX
chat_id=000000
msg="Ansible task finished"
"""
RETURN = """
msg:
description: The message you attempted to send
returned: success
type: string
sample: "Ansible task finished"
"""
import urllib
def main():
module = AnsibleModule(
argument_spec = dict(
token = dict(type='str',required=True,no_log=True),
chat_id = dict(type='str',required=True,no_log=True),
msg = dict(type='str',required=True)),
supports_check_mode=True
)
token = urllib.quote(module.params.get('token'))
chat_id = urllib.quote(module.params.get('chat_id'))
msg = urllib.quote(module.params.get('msg'))
url = 'https://api.telegram.org/' + token + '/sendMessage?text=' + msg + '&chat_id=' + chat_id
if module.check_mode:
module.exit_json(changed=False)
response, info = fetch_url(module, url)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,792,582,396,599,395,000 | 24.92233 | 98 | 0.679401 | false |
saurabh6790/pow-app | accounts/doctype/sales_invoice/pos.py | 29 | 1618 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
@webnotes.whitelist()
def get_items(price_list, sales_or_purchase, item=None, item_group=None):
condition = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item='Yes'"
else:
condition = "i.is_purchase_item='Yes'"
if item_group and item_group != "All Item Groups":
condition += " and i.item_group='%s'" % item_group
if item:
condition += " and CONCAT(i.name, i.item_name) like %(name)s"
args["name"] = "%%%s%%" % item
return webnotes.conn.sql("""select i.name, i.item_name, i.image,
item_det.ref_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, ref_rate, currency from
`tabItem Price` where price_list=%s) item_det
ON
item_det.item_code=i.name
where
%s""" % ('%(price_list)s', condition), args, as_dict=1)
@webnotes.whitelist()
def get_item_code(barcode_serial_no):
input_via = "serial_no"
item_code = webnotes.conn.sql("""select name, item_code from `tabSerial No` where
name=%s""", (barcode_serial_no), as_dict=1)
if not item_code:
input_via = "barcode"
item_code = webnotes.conn.sql("""select name from `tabItem` where barcode=%s""",
(barcode_serial_no), as_dict=1)
if item_code:
return item_code, input_via
else:
webnotes.throw("Invalid Barcode / Serial No")
@webnotes.whitelist()
def get_mode_of_payment():
return webnotes.conn.sql("""select name from `tabMode of Payment`""", as_dict=1) | agpl-3.0 | 7,127,084,398,929,267,000 | 30.134615 | 83 | 0.677998 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/webdriver/common/webserver.py | 20 | 4133 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple web server for testing purpose.
It serves the testing html pages that are needed by the webdriver unit tests."""
import logging
import os
import socket
import threading
import urllib
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def updir():
dirname = os.path.dirname
return dirname(dirname(__file__))
LOGGER = logging.getLogger(__name__)
WEBDRIVER = os.environ.get("WEBDRIVER", updir())
HTML_ROOT = os.path.join(WEBDRIVER, "../../../../../../common/src/web")
if not os.path.isdir(HTML_ROOT):
message = ("Can't find 'common_web' directory, try setting WEBDRIVER"
" environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT )
LOGGER.error(message)
assert 0, message
DEFAULT_PORT = 8000
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self):
"""GET method handler."""
try:
path = self.path[1:].split('?')[0]
html = open(os.path.join(HTML_ROOT, path))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.read())
html.close()
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
def log_message(self, format, *args):
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer(object):
"""A very basic web server."""
def __init__(self, port=DEFAULT_PORT):
self.stop_serving = False
port = port
while True:
try:
self.server = HTTPServer(
('', port), HtmlOnlyHandler)
self.port = port
break
except socket.error:
LOGGER.debug("port %d is in use, trying to next one"
% port)
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self):
"""Runs the server loop."""
LOGGER.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
"""Starts the server."""
self.thread.start()
def stop(self):
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
urllib.URLopener().open("http://localhost:%d" % self.port)
except Exception:
pass
LOGGER.info("Shutting down the webserver")
self.thread.join()
def main(argv=None):
from optparse import OptionParser
from time import sleep
if argv is None:
import sys
argv = sys.argv
parser = OptionParser("%prog [options]")
parser.add_option("-p", "--port", dest="port", type="int",
help="port to listen (default: %s)" % DEFAULT_PORT,
default=DEFAULT_PORT)
opts, args = parser.parse_args(argv[1:])
if args:
parser.error("wrong number of arguments") # Will exit
server = SimpleWebServer(opts.port)
server.start()
print "Server started on port %s, hit CTRL-C to quit" % opts.port
try:
while 1:
sleep(0.1)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| mit | -793,751,500,942,008,200 | 29.549618 | 92 | 0.587467 | false |
adalekin/django-balancer | docs/conf.py | 4 | 7200 | # -*- coding: utf-8 -*-
#
# django-balancer documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 18 10:17:32 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
DOCS_BASE = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(DOCS_BASE, '..')))
import balancer
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-balancer'
copyright = u'2010, Brandon Konkle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = balancer.get_version(short=True)
# The full version, including alpha/beta/rc tags.
release = balancer.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-balancerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-balancer.tex', u'django-balancer Documentation',
u'Brandon Konkle', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-balancer', u'django-balancer Documentation',
[u'Brandon Konkle'], 1)
]
| bsd-3-clause | 3,372,376,978,223,580,000 | 31.579186 | 80 | 0.710139 | false |
htygithub/bokeh | bokeh/state.py | 2 | 8592 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Encapsulate implicit state that is useful for Bokeh plotting APIs.
Generating output for Bokeh plots requires coordinating several things:
:class:`Documents <bokeh.document>`
Group together Bokeh models that may be shared between plots (e.g.,
range or data source objects) into one common namespace.
:class:`Resources <bokeh.resources>`
Control how JavaScript and CSS for the client library BokehJS are
included and used in the generated output.
It is certainly possible to handle the configuration of these objects
manually, and several examples of this can be found in ``examples/glyphs``.
When developing sophisticated applications, it may be necessary or
desirable to work at this level. However, for general use this would
quickly become burdensome. The ``bokeh.state`` module provides a ``State``
class that encapsulates these objects and ensures their proper configuration.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import os
# Third-party imports
# Bokeh imports
from .document import Document
from .resources import Resources
from .client import DEFAULT_SESSION_ID
from bokeh.resources import DEFAULT_SERVER_HTTP_URL
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class State(object):
""" Manage state related to controlling Bokeh output.
Attributes:
document (:class:`bokeh.document.Document`): a default document to use
file (dict) : default filename, resources, etc. for file output
This dictionary has the following form::
{
'filename' : # filename to use when saving
'resources' : # resources configuration
'title' : # a title for the HTML document
}
notebook (bool) : whether to generate notebook output
session_id (str) : a default session ID for Bokeh server output
autoadd (bool) : whether certain functions automatically add roots to the document
autosave (bool) : whether certain functions automatically save the file
autopush (bool): whether certain functions automatically push to the server
"""
def __init__(self):
# TODO (havocp) right now there's no way to turn off autoadd
self._autoadd = True
self.reset()
@property
def document(self):
return self._document
@document.setter
def document(self, doc):
self._document = doc
@property
def file(self):
return self._file
@property
def notebook(self):
return self._notebook
@property
def session_id(self):
return self._session_id
@property
def server_url(self):
return self._server_url
@property
def autoadd(self):
return self._autoadd
@property
def autosave(self):
return self._autosave
@property
def autopush(self):
return self._autopush
def _reset_keeping_doc(self):
self._file = None
self._notebook = False
self._session_id = None
self._server_url = None
self._autosave = False
self._autopush = False
def _reset_with_doc(self, doc):
self._document = doc
self._reset_keeping_doc()
def reset(self):
''' Deactivate all currently active output modes and set curdoc() to a fresh empty Document.
Subsequent calls to show() will not render until a new output mode is
activated.
Returns:
None
'''
self._reset_with_doc(Document())
def output_file(self, filename, title="Bokeh Plot", autosave=False, mode="cdn", root_dir=None):
"""Output to a standalone HTML file.
Does not change the current Document from curdoc(). File,
server, and notebook output may be active at the same
time, so this does not clear the effects of
output_server() or output_notebook().
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document
autosave (bool, optional) : whether to automatically save (default: False)
If True, then Bokeh plotting APIs may opt to automatically
save the file more frequently (e.g., after any plotting
command). If False, then the file is only saved upon calling
:func:`show` or :func:`save`.
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked, or any time a Bokeh plotting API
causes a save, if ``autosave`` is True.
"""
self._file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir),
'title' : title
}
self._autosave = autosave
if os.path.isfile(filename):
logger.info("Session output file '%s' already exists, will be overwritten." % filename)
def output_notebook(self):
"""Generate output in Jupyter/IPython notebook cells.
This does not clear the effects of output_file() or
output_server(), it only adds an additional output
destination (publishing to IPython Notebook). If
output_server() has been called, the notebook output cell
will be loaded from a Bokeh server; otherwise, Bokeh
publishes HTML to the notebook directly.
Returns:
None
"""
self._notebook = True
def output_server(self, session_id=DEFAULT_SESSION_ID, url="default", autopush=False):
"""Store Bokeh plots and objects on a Bokeh server.
File, server, and notebook output may be active at the
same time, so this does not clear the effects of
output_file() or output_notebook(). output_server()
changes the behavior of output_notebook(), so the notebook
will load output cells from the server rather than
receiving them as inline HTML.
Args:
session_id (str) : Name of session to push on Bokeh server
Any existing session with the same name will be overwritten.
url (str, optional) : base URL of the Bokeh server (default: "default")
If "default" use the default localhost URL.
autopush (bool, optional) : whether to automatically push (default: False)
If True, then Bokeh plotting APIs may opt to automatically
push the document more frequently (e.g., after any plotting
command). If False, then the document is only pushed upon calling
:func:`show` or :func:`push`.
Returns:
None
.. warning::
Calling this function will replace any existing server-side document in the named session.
"""
if url == "default":
url = DEFAULT_SERVER_HTTP_URL
self._session_id = session_id
self._server_url = url
self._autopush = autopush
| bsd-3-clause | 3,575,956,497,072,917,000 | 33.785425 | 103 | 0.573673 | false |
sysadmin75/ansible-modules-core | cloud/rackspace/rax_dns_record.py | 16 | 11455 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
version_added: 2.1
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound, e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound, e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
| gpl-3.0 | 7,042,761,870,120,024,000 | 31.916667 | 94 | 0.571715 | false |
sebalix/OpenUpgrade | addons/base_action_rule/__init__.py | 438 | 1098 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_action_rule
import test_models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,471,106,183,912,570,400 | 42.92 | 79 | 0.614754 | false |
ghostsquad/pytest | testing/test_assertinterpret.py | 30 | 8937 | "PYTEST_DONT_REWRITE"
import pytest, py
from _pytest.assertion import util
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_not_being_rewritten():
assert "@py_builtins" not in globals()
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_with_explicit_message():
try:
assert f() == 3, "hello"
except AssertionError:
e = exvalue()
assert e.msg == 'hello'
def test_assert_within_finally():
excinfo = pytest.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_non_string_message():
class A:
def __str__(self):
return "hello"
try:
assert 0 == 1, A()
except AssertionError:
e = exvalue()
assert e.msg == "hello"
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in e.msg
def test_private_class_variable():
class X:
def __init__(self):
self.__v = 41
def m(self):
assert self.__v == 42
try:
X().m()
except AssertionError:
e = exvalue()
assert "== 42" in e.msg
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert e.msg.find('WeirdRepr') != -1
assert e.msg.find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert e.msg.find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert e.msg.find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
pytest.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in e.msg
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in e.msg
class TestView:
def setup_class(cls):
cls.View = pytest.importorskip("_pytest.assertion.oldinterpret").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_assert_customizable_reprcompare(monkeypatch):
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
"PYTEST_DONT_REWRITE"
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@pytest.mark.skipif("sys.version_info < (2,5)")
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError:
s = str(exvalue())
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
@pytest.mark.skipif("sys.version_info >= (2,6)")
def test_oldinterpret_importation():
# we had a cyclic import there
# requires pytest on sys.path
res = py.std.subprocess.call([
py.std.sys.executable, '-c', str(py.code.Source("""
try:
from _pytest.assertion.newinterpret import interpret
except ImportError:
from _pytest.assertion.oldinterpret import interpret
"""))
])
assert res == 0
| mit | -899,875,551,464,553,100 | 24.461538 | 80 | 0.522771 | false |
sparklyballs/XBMC-Headless | xbmcdata/addons/service.xbmc.versioncheck/lib/common.py | 82 | 7008 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonname__ = __addon__.getAddonInfo('name')
__addonpath__ = __addon__.getAddonInfo('path').decode('utf-8')
__addonprofile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
# Fixes unicode problems
def string_unicode(text, encoding='utf-8'):
try:
text = unicode( text, encoding )
except:
pass
return text
def normalize_string(text):
try:
text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore')
except:
pass
return text
def localise(id):
string = normalize_string(__addon__.getLocalizedString(id))
return string
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % ("Version Check", txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def get_password_from_user():
keyboard = xbmc.Keyboard("", __addonname__ + "," +localise(32022), True)
keyboard.doModal()
if (keyboard.isConfirmed()):
pwd = keyboard.getText()
return pwd
def message_upgrade_success():
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(__addonname__,
localise(32013),
15000,
__icon__))
def message_restart():
if dialog_yesno(32014):
xbmc.executebuiltin("RestartApp")
def dialog_yesno(line1 = 0, line2 = 0):
return xbmcgui.Dialog().yesno(__addonname__,
localise(line1),
localise(line2))
def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available):
# Don't show while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 5 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
if __addon__.getSetting("lastnotified_version") < __addonversion__:
xbmcgui.Dialog().ok(__addonname__,
localise(msg),
localise(32001),
localise(32002))
#__addon__.setSetting("lastnotified_version", __addonversion__)
else:
log("Already notified one time for upgrading.")
def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,):
# shorten releasecandidate to rc
if version_installed['tag'] == 'releasecandidate':
version_installed['tag'] = 'rc'
if version_available['tag'] == 'releasecandidate':
version_available['tag'] = 'rc'
# convert json-rpc result to strings for usage
msg_current = '%i.%i %s%s' %(version_installed['major'],
version_installed['minor'],
version_installed['tag'],
version_installed.get('tagversion',''))
msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','')
msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','')
msg = localise(32034) %(msg_current, msg_available)
# Don't show notify while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 10 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
# hack: convert current version number to stable string
# so users don't get notified again. remove in future
if __addon__.getSetting("lastnotified_version") == '0.1.24':
__addon__.setSetting("lastnotified_stable", msg_stable)
# Show different dialogs depending if there's a newer stable available.
# Also split them between xbmc and kodi notifications to reduce possible confusion.
# People will find out once they visit the website.
# For stable only notify once and when there's a newer stable available.
# Ignore any add-on updates as those only count for != stable
if oldversion == 'stable' and __addon__.getSetting("lastnotified_stable") != msg_stable:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32030),
localise(32031))
else:
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32032),
localise(32033))
__addon__.setSetting("lastnotified_stable", msg_stable)
elif oldversion != 'stable' and __addon__.getSetting("lastnotified_version") != msg_available:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
# point them to xbmc.org
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32031))
else:
#use kodi.tv
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32033))
# older skins don't support a text field in the OK dialog.
# let's use split lines for now. see code above.
'''
msg = localise(32034) %(msg_current, msg_available)
if oldversion == 'stable':
msg = msg + ' ' + localise(32030)
else:
msg = msg + ' ' + localise(32035)
msg = msg + ' ' + localise(32031)
xbmcgui.Dialog().ok(__addonname__, msg)
#__addon__.setSetting("lastnotified_version", __addonversion__)
'''
__addon__.setSetting("lastnotified_version", msg_available)
else:
log("Already notified one time for upgrading.") | mit | -2,460,494,774,005,998,600 | 39.988304 | 155 | 0.570205 | false |
apurvbhartia/gnuradio-routing | gr-wxgui/src/python/forms/__init__.py | 16 | 4222 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
The following classes will be available through gnuradio.wxgui.forms:
"""
########################################################################
# External Converters
########################################################################
from converters import \
eval_converter, str_converter, \
float_converter, int_converter
########################################################################
# External Forms
########################################################################
from forms import \
radio_buttons, drop_down, notebook, \
button, toggle_button, single_button, \
check_box, text_box, static_text, \
slider, log_slider, gauge, \
make_bold, DataEvent, EVT_DATA
########################################################################
# Helpful widgets
########################################################################
import wx
class static_box_sizer(wx.StaticBoxSizer):
"""
A box sizer with label and border.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param label title label for this widget (optional)
@param bold true to boldify the label
@param orient the sizer orientation wx.VERTICAL or wx.HORIZONTAL (default=wx.VERTICAL)
"""
def __init__(self, parent, label='', bold=False, sizer=None, orient=wx.VERTICAL, proportion=0, flag=wx.EXPAND):
box = wx.StaticBox(parent=parent, label=label)
if bold: make_bold(box)
wx.StaticBoxSizer.__init__(self, box=box, orient=orient)
if sizer: sizer.Add(self, proportion, flag)
class incr_decr_buttons(wx.BoxSizer):
"""
A horizontal box sizer with a increment and a decrement button.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param label title label for this widget (optional)
@param on_incr the callback for pressing the + button
@param on_decr the callback for pressing the - button
"""
def __init__(self, parent, on_incr, on_decr, label='', sizer=None, proportion=0, flag=wx.EXPAND):
"""
@param parent the parent window
@param on_incr the event handler for increment
@param on_decr the event handler for decrement
"""
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
buttons_box = wx.BoxSizer(wx.HORIZONTAL)
self._incr_button = wx.Button(parent, label='+', style=wx.BU_EXACTFIT)
self._incr_button.Bind(wx.EVT_BUTTON, on_incr)
buttons_box.Add(self._incr_button, 0, wx.ALIGN_CENTER_VERTICAL)
self._decr_button = wx.Button(parent, label=' - ', style=wx.BU_EXACTFIT)
self._decr_button.Bind(wx.EVT_BUTTON, on_decr)
buttons_box.Add(self._decr_button, 0, wx.ALIGN_CENTER_VERTICAL)
if label: #add label
self.Add(wx.StaticText(parent, label='%s: '%label), 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
self.Add(buttons_box, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
else: self.Add(buttons_box, 0, wx.ALIGN_CENTER_VERTICAL)
if sizer: sizer.Add(self, proportion, flag)
def Disable(self, disable=True): self.Enable(not disable)
def Enable(self, enable=True):
if enable:
self._incr_button.Enable()
self._decr_button.Enable()
else:
self._incr_button.Disable()
self._decr_button.Disable()
| gpl-3.0 | -5,243,434,561,082,318,000 | 39.990291 | 112 | 0.658219 | false |
oasiswork/odoo | openerp/addons/base/tests/test_qweb.py | 289 | 4814 | # -*- coding: utf-8 -*-
import cgi
import json
import os.path
import glob
import re
import collections
from lxml import etree
import openerp.addons.base.ir.ir_qweb
import openerp.modules
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
class TestQWebTField(common.TransactionCase):
def setUp(self):
super(TestQWebTField, self).setUp()
self.engine = self.registry('ir.qweb')
def context(self, values):
return ir_qweb.QWebContext(
self.cr, self.uid, values, context={'inherit_branding': True})
def test_trivial(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
company_id = Companies.create(self.cr, self.uid, {
'name': "My Test Company"
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
"My Test Company",))
def test_i18n(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
s = u"Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20% off!"
company_id = Companies.create(self.cr, self.uid, {
'name': s,
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
cgi.escape(s.encode('utf-8')),))
def test_reject_crummy_tags(self):
field = etree.Element('td', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^RTE widgets do not work correctly'):
self.engine.render_node(field, self.context({
'company': None
}))
def test_reject_t_tag(self):
field = etree.Element('t', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^t-field can not be used on a t element'):
self.engine.render_node(field, self.context({
'company': None
}))
class TestQWeb(common.TransactionCase):
matcher = re.compile('^qweb-test-(.*)\.xml$')
@classmethod
def get_cases(cls):
path = cls.qweb_test_file_path()
return (
cls("test_qweb_{}".format(cls.matcher.match(f).group(1)))
for f in os.listdir(path)
# js inheritance
if f != 'qweb-test-extend.xml'
if cls.matcher.match(f)
)
@classmethod
def qweb_test_file_path(cls):
path = os.path.dirname(
openerp.modules.get_module_resource(
'web', 'static', 'lib', 'qweb', 'qweb2.js'))
return path
def __getattr__(self, item):
if not item.startswith('test_qweb_'):
raise AttributeError("No {} on {}".format(item, self))
f = 'qweb-test-{}.xml'.format(item[10:])
path = self.qweb_test_file_path()
return lambda: self.run_test_file(os.path.join(path, f))
def run_test_file(self, path):
context = openerp.addons.base.ir.ir_qweb.QWebContext(self.cr, self.uid, {})
qweb = self.env['ir.qweb']
doc = etree.parse(path).getroot()
qweb.load_document(doc, None, context)
for template in context.templates:
if template.startswith('_'): continue
param = doc.find('params[@id="{}"]'.format(template))
# OrderedDict to ensure JSON mappings are iterated in source order
# so output is predictable & repeatable
params = {} if param is None else json.loads(param.text, object_pairs_hook=collections.OrderedDict)
ctx = context.copy()
ctx.update(params)
result = doc.find('result[@id="{}"]'.format(template)).text
self.assertEqual(
qweb.render(template, qwebcontext=ctx).strip(),
(result or u'').strip().encode('utf-8'),
template
)
def load_tests(loader, suite, _):
# can't override TestQWeb.__dir__ because dir() called on *class* not
# instance
suite.addTests(TestQWeb.get_cases())
return suite
| agpl-3.0 | -1,273,173,836,015,956,700 | 33.568345 | 111 | 0.56129 | false |
carsongee/edx-platform | cms/djangoapps/contentstore/management/commands/course_id_clash.py | 18 | 2127 | """
Script for finding all courses whose org/name pairs == other courses when ignoring case
"""
from django.core.management.base import BaseCommand
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
#
# To run from command line: ./manage.py cms --settings dev course_id_clash
#
class Command(BaseCommand):
"""
Script for finding all courses in the Mongo Modulestore whose org/name pairs == other courses when ignoring case
"""
help = 'List all courses ids in the Mongo Modulestore which may collide when ignoring case'
def handle(self, *args, **options):
mstore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
if hasattr(mstore, 'collection'):
map_fn = '''
function () {
emit(this._id.org.toLowerCase()+this._id.course.toLowerCase(), {target: this._id});
}
'''
reduce_fn = '''
function (idpair, matches) {
var result = {target: []};
matches.forEach(function (match) {
result.target.push(match.target);
});
return result;
}
'''
finalize = '''
function(key, reduced) {
if (Array.isArray(reduced.target)) {
return reduced;
}
else {return null;}
}
'''
results = mstore.collection.map_reduce(
map_fn, reduce_fn, {'inline': True}, query={'_id.category': 'course'}, finalize=finalize
)
results = results.get('results')
for entry in results:
if entry.get('value') is not None:
print '{:-^40}'.format(entry.get('_id'))
for course_id in entry.get('value').get('target'):
print ' {}/{}/{}'.format(course_id.get('org'), course_id.get('course'), course_id.get('name'))
| agpl-3.0 | -1,696,778,886,284,812,500 | 39.903846 | 120 | 0.526093 | false |
codingcommando/tmtp | standalone/New_Jeans2.py | 1 | 23537 | #!/usr/bin/env python
# New_Jeans2.py
# PatternMaker: Susan Spencer Conklin
# pants shell pattern
from tmtpl.constants import *
from tmtpl.pattern import *
from tmtpl.client import Client
from math import sqrt
class PatternDesign():
def __init__(self):
self.styledefs={}
self.markerdefs={}
return
def pattern(self):
"""
Method defining a pattern design. This is where the designer places
all elements of the design definition
"""
# All measurements are converted to pixels
# x increases towards right, y increases towards bottom of drawing - Quadrant is 'upside down'
# All angles are in radians
# angles start with 0 at '3:00', & move clockwise b/c quadrant is 'upside down'
cd = self.cd #client data is prefaced with cd
printer = '36" wide carriage plotter'
companyName = 'Seamly Patterns' # mandatory
designerName = 'Susan Spencer' # mandatory
patternName = 'pants Foundation' # mandatory
patternNumber = 'WS010-xj1-1' # mandatory
doc = setupPattern(self, cd, printer, companyName, designerName, patternName, patternNumber)
riseLine = cd.side_rise + (1*IN) # 1" sitting ease from hipline to riseline
hipLine = cd.front_hip_length # don't add 1" extra
hemLine = riseLine + cd.inseam
kneeLine = riseLine + cd.inseam/2. - (1*IN) # kneeline is 1" above midleg
# TODO - choose if using thick fabric
#seamEase = (1/16.0) * IN # 1/16" seam ease for thick fabric, 0 if not
seamEase = 0
waistLine = (1*IN) # Jeans waist is 1" lower than actual waist
frontDartWidth = 0.5*IN
frontDartLength = hipLine/2.
backDartWidth = 0.75*IN
backDartLength = hipLine*2/3.
waistBand = 1*IN # Height of waistBand
backKneeWidth = 10*IN
backHemWidth = 8*IN
frontKneeWidth = 8*IN
frontHemWidth = 7*IN
# pattern object
pants = Pattern('pants')
pants.styledefs.update(self.styledefs)
pants.markerdefs.update(self.markerdefs)
doc.add(pants)
# pants Front A
pants.add(PatternPiece('pattern', 'front', letter='A', fabric=2, interfacing=0, lining=0))
A = pants.front
top = 0.001 # can't use 0 in some calculations
side = 0.001
center = max(cd.front_waist_width*0.5, cd.front_hip_width*0.5)
width = center + cd.front_crotch_extension
creaseLine = width/2.0
TOPLEFT = pPoint(side, top)
TOPRIGHT = pPoint(center, top)
#a = pPoint(center, waistLine) # center waist
a = pPoint(center, riseLine - cd.front_rise - 1*IN) # center waist
#b = pPoint(center - cd.front_waist_width*0.5 - frontDartWidth - 2*seamEase, top) # side waist
radius = cd.front_waist_width*0.5 + frontDartWidth
Solution = pntIntersectLineCircleP(a, radius, TOPLEFT, TOPRIGHT) # returns pnt.intersections, pnt.p1, pnt.p2
if Solution.intersections == 1:
b = Solution.p1
elif Solution.intersections == 2:
if Solution.p1.x < a.x :
b = Solution.p1
else:
b = Solution.p2
#TODO - change angle of dart to be perpendicular to line ab
#pnt = pMidpointP(a, b) # dart center at waist along line ab
#c = pPoint(pnt.x, pnt.y + 0.25*IN) # lower dart center by 1/4in
c = pPointP(pMidpointP(a, b)) # dart center at waist along line ab
d = pPoint(c.x + frontDartWidth/2.0, c.y) # dart inside at waist
e = pPoint(c.x - frontDartWidth/2.0, c.y) # dart outside at waist
f = pPoint(c.x, c.y + frontDartLength) # dart point
angle = angleOfLineP(f, d) + angleOfVectorP(c, f, d)
g = pntFromDistanceAndAngleP(f, frontDartLength, angle) # on angle of sewn dart fold, after folded toward center
h = pPoint(center, riseLine/2.0) # center front 'pivot' point from crotch curve to front fly
i = pPoint(side, hipLine) # side hip
j = pPoint(center, hipLine) # center hip
k = pPoint(side, riseLine) # side rise
l = pPoint(center, riseLine) # center rise
m = pntFromDistanceAndAngleP(l, (1.25*IN), angleOfDegree(315.0)) # center crotch curve
n = pPoint(l.x + cd.front_crotch_extension, riseLine) # center crotch point
o = pPoint(creaseLine - frontKneeWidth/2.0, kneeLine) # inside knee
p = pPoint(creaseLine + frontKneeWidth/2.0, kneeLine) # outside knee
q = pPoint(creaseLine - frontHemWidth/2.0, hemLine) # inside hem
r = pPoint(creaseLine + frontHemWidth/2.0, hemLine) # outside hem
pnt1 = pntOnLineP(a, h, waistBand)
pnt2 = pntOnLineP(d, f, waistBand)
pnt3 = pntOnLineP(e, f, waistBand)
pnt4 = pntOnLineP(b, i, waistBand)
t1 = pntIntersectLinesP(pnt1, pnt2, a, h) # waistBand at center
u1 = pntIntersectLinesP(pnt1, pnt2, d, f) # waistBand at inside dart
v1 = pntIntersectLinesP(pnt3, pnt4, e, f) # waistBand at outside dart
w1 = pntIntersectLinesP(pnt3, pnt4, b, i) # waistBand at side
Side = rPoint(A, 'Side', side, top)
Center = rPoint(A, 'Center', center, top)
Inseam = rPoint(A, 'Inseam', width, top)
# front waist AW
AW1 = rPointP(A, 'AW1', a) # center waist
AW2 = rPointP(A, 'AW2', d) # inside dart
AW4 = rPointP(A, 'AW4', e) # outside dart
AW5 = rPointP(A, 'AW5', b) # side waist
# front waist control points
AW2_c1 = cPointP(A, 'AW2_c1', pntFromDistanceAndAngleP(AW1, lineLengthP(AW1, AW2)/3.0, angleOfLineP(j, AW1) - angleOfDegree(90))) # b/w AW1 & AW2
AW2_c2 = cPointP(A, 'AW2_c2', pntFromDistanceAndAngleP(AW2, lineLengthP(AW1, AW2)/3.0, angleOfLineP(f, AW2) + angleOfDegree(90))) # b/w AW1 & AW2
AW5_c1 = cPointP(A, 'AW5_c1', pntFromDistanceAndAngleP(AW4, lineLengthP(AW4, AW5)/3.0, angleOfLineP(f, AW4) - angleOfDegree(90))) # b/w AW4 & AW5
AW5_c2 = cPointP(A, 'AW5_c2', pntFromDistanceAndAngleP(AW5, lineLengthP(AW4, AW5)/3.0, angleOfLineP(i, AW5) + angleOfDegree(90))) # b/w AW4 & AW5
u1_c1 = cPointP(A, 'u1_c1', pntFromDistanceAndAngleP(t1, lineLengthP(t1, u1)/3.0, angleOfLineP(t1, AW1) - angleOfDegree(90))) # b/w t1 & u1
u1_c2 = cPointP(A, 'u1_c2', pntFromDistanceAndAngleP(u1, lineLengthP(t1, u1)/3.0, angleOfLineP(f, u1) + angleOfDegree(90))) # b/w t1 & u1
w1_c1 = cPointP(A, 'w1_c1', pntFromDistanceAndAngleP(v1, lineLengthP(v1, w1)/3.0, angleOfLineP(f, v1) - angleOfDegree(90))) # b/w v1 & w1
w1_c2 = cPointP(A, 'w1_c2', pntFromDistanceAndAngleP(w1, lineLengthP(v1, w1)/3.0, angleOfLineP(w1, AW5) + angleOfDegree(90))) # b/w v1 & w1
pnt1 = rPointP(A, 'pnt1', pntIntersectLinesP(f, g, AW2, AW2_c2)) # where sewn dart fold should cross waistline before folding
pnt2 = rPointP(A, 'pnt2', pntFromDistanceAndAngleP(AW4, lineLengthP(AW4, pnt1), angleOfLineP(AW2, pnt1) - angleOfVectorP(c, f, d)))
pnt3 = rPointP(A, 'pnt3', pntIntersectLinesP(f, pnt1, AW4, pnt2))
AW3 = rPointP(A, 'AW3', pntOnLineP(f, c, lineLengthP(f, pnt3))) # extend dart center up to make sewn dart fold cross waistline
# front dart AD
AD1 = rPointP(A, 'AD1', f) # dart point
AD2 = rPointP(A, 'AD2', pntOffLineP(d, AD1, SEAM_ALLOWANCE)) # inside dart at cuttingline
AD3 = rPointP(A, 'AD3', pntOffLineP(e, AD1, SEAM_ALLOWANCE)) # outside dart at cuttingline
# front side seam AS
AS1 = rPointP(A, 'AS1', i)
AS2 = rPointP(A, 'AS2', o)
AS3 = rPointP(A, 'AS3', q)
# front side seam control points cAS
# control points next to AS1 form a vertical line at AS1.x, control point nearest AS2 is along line of hem to knee so that seam curves continuously into straight seam from knee to hem
distance = lineLengthP(AS1, AW5)/4.0 # shorter control point line = flatter curve between waist & hip
AS1_c2 = cPoint(A, 'AS1_c2', AS1.x, AS1.y - distance) # b/w AW5 & AS1
angle = angleOfLineP(AW5, AS1_c2)
AS1_c1 = cPointP(A, 'AS1_c1', pntFromDistanceAndAngleP(AW5, distance, angle)) # b/w AW5 & AS1
distance = lineLengthP(AS1, AS2)/3.0
AS2_c1 = cPoint(A, 'AS2_c1', AS1.x, AS1.y + distance) # b/w AS1 & AS2
angle = angleOfLineP(AS3, AS2)
AS2_c2 = cPointP(A, 'AS2_c2', pntFromDistanceAndAngleP(AS2, distance, angle)) #b/w AS1 & AS2
# front inseam AI
AI1 = rPointP(A, 'AI1', r)
AI2 = rPointP(A, 'AI2', p)
AI3 = rPointP(A, 'AI3', n)
# front inseam control points cAI
AI3_c1 = cPointP(A, 'AI3_c1', pntOffLineP(AI2, AI1, lineLengthP(AI2, AI3)/3.0)) #b/w AI2 & AI3
AI3_c2 = cPointP(A, 'AI3_c2', pntOnLineP(AI3, AI3_c1, lineLengthP(AI2, AI3)/3.0)) #b/w AI2 & AI3
#front center seam AC
AC1 = rPointP(A, 'AC1', m)
AC2 = rPointP(A, 'AC2', h)
# front center seam control points cAC
AC2_c2 = cPointP(A, 'AC2_c2', pntOffLineP(AC2, AW1, lineLengthP(l, AC2)*(5/8.0)))
pnts = pointList(AI3, AC1, AC2_c2)
c1, c2 = controlPoints('FrontCenterSeam', pnts)
AC1_c1, AC1_c2 = cPointP(A, 'AC1_c1', c1[0]), cPointP(A, 'AC1_c2', c2[0]) #b/w AI3 & AC1
AC2_c1 = cPointP(A, 'AC2_c1', c1[1]) #b/w AC1 & AC2
#front grainline AG & label location
AG1 = rPoint(A, 'AG1', creaseLine, hipLine)
AG2 = rPoint(A, 'AG2', creaseLine, hemLine - 2.0*IN)
# front label location
A.label_x, A.label_y = creaseLine, hipLine - 2.0*IN
#grid path
grid = path()
addToPath(grid, 'M', Side, 'L', k, 'L', n, 'L', Inseam, 'L', Side, 'M', AS1, 'L', j, 'M', Center, 'L', l , 'L', m)
addToPath(grid, 'M', AW1, 'L', AW5,'M', AW1, 'L', AW2, 'M', AW4, 'L', AW5, 'M', t1, 'L', u1, 'M', v1, 'L', w1) # waist grid lines
# dart 'd' path
dartLine = path()
addToPath(dartLine, 'M', AD2, 'L', AD1, 'L', AD3)
# seamline 's' & cuttingline 'c' paths
seamLine = path()
cuttingLine = path()
for p in (seamLine, cuttingLine):
addToPath(p, 'M', AW1, 'C', AW2_c1, AW2_c2, AW2, 'L', AW3, 'L', AW4, 'C', AW5_c1, AW5_c2, AW5) # waist
addToPath(p, 'C', AS1_c1, AS1_c2, AS1, 'C', AS2_c1, AS2_c2, AS2, 'L', AS3) # side
addToPath(p, 'L', AI1, 'L', AI2, 'C', AI3_c1, AI3_c2, AI3) # inseam
addToPath(p, 'C', AC1_c1, AC1_c2, AC1, 'C', AC2_c1, AC2_c2, AC2, 'L', AW1) # center
# add grainline, dart, seamline & cuttingline paths to pattern
addGrainLine(A, AG1, AG2)
addGridLine(A, grid)
addDartLine(A, dartLine)
addSeamLine(A, seamLine)
addCuttingLine(A, cuttingLine)
# pants Back 'B'
#TODO - change angle of dart to be perpendicular to waistline
#TODO - use side_rise and back_rise to create reference grid
#TODO - use back_hip_length and crotch waist-to-waist measurements
pants.add(PatternPiece('pattern', 'back', letter='B', fabric=2, interfacing=0, lining=0))
B = pants.back
top = 0.001
crotch = 0.001
center = cd.back_crotch_extension
width = center + max(cd.back_hip_width*0.5, cd.back_waist_width*0.5)
side = width
creaseLine = width/2.0
Inseam = rPoint(B, 'Inseam', crotch, top)
Center = rPoint(B, 'Center', center, top)
Width = rPoint(B, 'Width', width, top)
Side = rPointP(B, 'Side', Width)
a = pPoint(center + (1+(1/8.))*IN, top - (1.*IN)) # center waist
b = pPoint(center + cd.back_waist_width*0.5 + backDartWidth, top) # side waist
pnt = pntOnLineP(a, b, lineLengthP(a, b)/2.0)
c = pPoint(pnt.x, pnt.y + (1/4.0)*IN) # dart center at waist along line ab
d = pPoint(c.x - backDartWidth/2.0, c.y) # dart inside at waist
e = pPoint(c.x + backDartWidth/2.0, c.y) # dart outside at waist
f = pPoint(c.x, c.y + backDartLength) # dart point
angle = angleOfLineP(f, d) - angleOfVectorP(c, f, d)
g = pntFromDistanceAndAngleP(f, backDartLength, angle) # on angle of sewn dart fold, after folded toward center
h = pPoint(center, riseLine/2.0) # center front 'pivot' point from crotch curve to front fly
i = pPoint(side, hipLine) # side hip
j = pPoint(center, hipLine) # center hip
k = pPoint(side, riseLine) # side rise
l = pPoint(center, riseLine) # center rise
m = pntFromDistanceAndAngleP(l, (1.25*IN), angleOfDegree(225.0)) # center crotch curve
n = pPoint(crotch, riseLine) # center crotch point
o = pPoint(creaseLine - backKneeWidth/2.0, kneeLine) # inside knee
p = pPoint(creaseLine + backKneeWidth/2.0, kneeLine) # outside knee
q = pPoint(creaseLine - backHemWidth/2.0, hemLine) # inside hem
r = pPoint(creaseLine + backHemWidth/2.0, hemLine) # outside hem
pnt1 = pPoint(a.x, a.y + waistBand)
pnt2 = pPoint(d.x, d.y + waistBand)
pnt3 = pPoint(e.x, e.y + waistBand)
pnt4 = pPoint(b.x, b.y + waistBand)
t2 = rPointP(B, 't2', pntIntersectLinesP(pnt1, pnt2, a, h)) # waistBand at center
u2 = rPointP(B, 'u2', pntIntersectLinesP(pnt1, pnt2, d, f)) # waistBand at inside dart
v2 = rPointP(B, 'v2', pntIntersectLinesP(pnt3, pnt4, e, f)) # waistBand at outside dart
w2 = rPointP(B, 'w2', pntIntersectLinesP(pnt3, pnt4, b, i)) # waistBand at side
# back waist BW
BW1 = rPointP(B, 'BW1', a) # center waist
BW2 = rPointP(B, 'BW2', d) # inside dart
BW4 = rPointP(B, 'BW4', e) # outside dart
BW5 = rPointP(B, 'BW5', b) # side waist
# back waist control points
BW2_c1 = cPointP(B, 'BW2_c1', pntFromDistanceAndAngleP(BW1, lineLengthP(BW1, BW2)/3.0, angleOfLineP(j, BW1) + angleOfDegree(90)))
BW2_c2 = cPointP(B, 'BW2_c2', pntFromDistanceAndAngleP(BW2, lineLengthP(BW1, BW2)/3.0, angleOfLineP(f, BW2) - angleOfDegree(90)))
BW5_c1 = cPointP(B, 'BW5_c1', pntFromDistanceAndAngleP(BW4, lineLengthP(BW4, BW5)/3.0, angleOfLineP(f, BW4) + angleOfDegree(90)))
BW5_c2 = cPointP(B, 'BW5_c2', pntFromDistanceAndAngleP(BW5, lineLengthP(BW4, BW5)/3.0, angleOfLineP(i, BW5) - angleOfDegree(90)))
u2_c1 = cPointP(B, 'u2_c1', pntFromDistanceAndAngleP(t2, lineLengthP(t2, u2)/3.0, angleOfLineP(t2, BW1) + angleOfDegree(90))) # b/w t2 & u2
u2_c2 = cPointP(B, 'u2_c2', pntFromDistanceAndAngleP(u2, lineLengthP(t2, u2)/3.0, angleOfLineP(u2, BW2) - angleOfDegree(90))) # b/w t2 & u2
w2_c1 = cPointP(B, 'w2_c1', pntFromDistanceAndAngleP(v2, lineLengthP(v2, w2)/3.0, angleOfLineP(f, v2) + angleOfDegree(90))) # b/w v2 & w2
w2_c2 = cPointP(B, 'w2_c2', pntFromDistanceAndAngleP(w2, lineLengthP(v2, w2)/3.0, angleOfLineP(w2, BW5) - angleOfDegree(90))) # b/w v2 & w2
# back dart BD
pnt1 = rPointP(B, 'pnt1', pntIntersectLinesP(f, g, BW2, BW2_c2)) # where sewn dart fold should cross waistline before folding
pnt2 = rPointP(B, 'pnt2', pntFromDistanceAndAngleP(BW4, lineLengthP(BW4, pnt1), angleOfLineP(BW2, pnt1) + angleOfVectorP(c, f, d)))
pnt3 = rPointP(B, 'pnt3', pntIntersectLinesP(f, pnt1, BW4, pnt2))
BW3 = rPointP(B, 'BW3', pntOnLineP(f, c, lineLengthP(f, pnt3))) # extend dart center up to make sewn dart fold cross waistline
BD1 = rPointP(B, 'BD1', f) # dart point
BD2 = rPointP(B, 'BD2', pntOffLineP(d, BD1, SEAM_ALLOWANCE)) # inside dart at cuttingline
BD3 = rPointP(B, 'BD3', pntOffLineP(e, BD1, SEAM_ALLOWANCE)) # outside dart at cuttingline
# back side seam BS
BS1 = rPointP(B, 'BS1', i) # side hip
BS2 = rPointP(B, 'BS2', p) # outside knee
BS3 = rPointP(B, 'BS3', r) # outside hem
# back side seam control points
# control points at hip are vertical
distance = lineLengthP(BS1, BW5)/4.0# shorter control point line = flatter curve between waist & hip
BS1_c2 = cPoint(B, 'BS1_c2', BS1.x, BS1.y - distance) # b/w BW5 & BS1
angle = angleOfLineP(BW5, BS1_c2)
BS1_c1 = cPointP(B, 'BS1_c1', pntFromDistanceAndAngleP(BW5, distance, angle)) # b/w BW5 & BS1
distance = lineLengthP(BS1, BS2)/3.0
BS2_c1 = cPoint(B, 'BS2_c1', BS1.x, BS1.y + distance) # b/w BS1 & BS2
angle = angleOfLineP(BS3, BS2)
BS2_c2 = cPointP(B, 'BS2_c2', pntFromDistanceAndAngleP(BS2, distance, angle)) #b/w BS1 & BS2
# back inseam BI
BI1 = rPointP(B, 'BI1', q) # inseam hem
BI2 = rPointP(B, 'BI2', o) # inseam knee
BI3 = rPointP(B, 'BI3', n) # crotch point
# back inseam control points
BI3_c1 = cPointP(B, 'BI3_c1', pntOffLineP(BI2, BI1, lineLengthP(BI2, BI3)/3.0)) #b/w BI2 & BI3
BI3_c2 = cPointP(B, 'BI3_c2', pntOnLineP(BI3, BI3_c1, lineLengthP(BI2, BI3)/3.0)) #b/w BI2 & BI
# back center seam BC
BC1 = rPointP(B, 'BC1', m) # crotch curve
BC2 = rPointP(B, 'BC2', j) # center hip
# back center seam control points
BC2_c2 = cPointP(B, 'BC2_c2', pntOffLineP(BC2, BW1, lineLengthP(l, BC2)/3.0))
BC2_c1 = cPointP(B, 'BC2_c1', pntFromDistanceAndAngleP(BC1, lineLengthP(BC1, BC2_c2)/3.0, angleOfLineP(BI3, BC2))) # b/w BC1 & BC2
distance = lineLengthP(BI3, BC1)/3.0
BC1_c1 = cPoint(B, 'BC1_c1', BI3.x + distance, BI3.y) #b/w BI3 & BC1
BC1_c2 = cPointP(B, 'BC1_c2', pntFromDistanceAndAngleP(BC1, distance, angleOfLineP(BC2, BI3)))
# back grainline BG
BG1 = rPoint(B, 'BG1', creaseLine, hipLine) # grainline end 1
BG2 = rPoint(B, 'BG2', creaseLine, hemLine - 2.0*IN) # grainline end 2
# back label location
B.label_x, B.label_y = creaseLine, (hipLine - 2.0*IN) # label location
# grid
grid = path()
addToPath(grid, 'M', Inseam, 'L', Width, 'L', k, 'L', n, 'L', Inseam, 'M', Center, 'L', l, 'M', i, 'L', j) # horizontal & vertical: torso box, centerline, hipline
addToPath(grid, 'M', l, 'L', m, 'M', BW1, 'L', BW5, 'M', BD2, 'L', BD1, 'L', BD3) # diagonal: crotch curve, waistline, dartline
addToPath(grid, 'M',BW1, 'L', BW2, 'M', BW4, 'L', BW5, 'M', t2, 'L', u2, 'M', v2, 'L', w2) # line to create waistband pattern piece
# dart 'd' path
dartLine = path()
addToPath(dartLine, 'M', BD2, 'L', BD1, 'L', BD3)
# seamline 's' & cuttingline 'c' paths
seamLine = path()
cuttingLine = path()
for p in (seamLine, cuttingLine):
addToPath(p, 'M', BW1, 'C', BW2_c1, BW2_c2, BW2, 'L', BW3, 'L', BW4, 'C', BW5_c1, BW5_c2, BW5) # waist
addToPath(p, 'C', BS1_c1, BS1_c2, BS1, 'C', BS2_c1, BS2_c2, BS2, 'L', BS3) # side
addToPath(p, 'L', BI1, 'L', BI2, 'C', BI3_c1, BI3_c2, BI3) # inseam
addToPath(p, 'C', BC1_c1, BC1_c2, BC1, 'C', BC2_c1, BC2_c2, BC2, 'L', BW1) # center
# add grid, dart, grainline, seamline & cuttingline paths to pattern
addGrainLine(B, BG1, BG2)
addGridLine(B, grid)
addDartLine(B, dartLine)
addSeamLine(B, seamLine)
addCuttingLine(B, cuttingLine)
# Waistfacing 'C'
pants.add(PatternPiece('pattern', 'Waistfacing', letter='C', fabric=0, interfacing=2, lining=2))
C = pants.Waistfacing
top = 0.0
width = cd.front_waist_width*0.5 + cd.back_waist_width*0.5
# Waistfacing front center section
# lower section
CW1 = rPointP(C, 'CW1', t1)
CW2 = rPointP(C, 'CW2', u1)
# upper section
CW9 = rPointP(C, 'CW9', AW2)
CW10 = rPointP(C, 'CW10', AW1)
# Waistfacing front side section
connectorPoints = pointList(CW9, CW2) # 2 connector points from waistfacing above, upper = CW9, lower = CW2
moveObject = pointList(AW4, v1, w1, AW5) # front side section, outside of dart. 1st 2 points connect to connectorPoints
new_pnts = connectObjects(connectorPoints, moveObject) # translate & rotate front side section
# skip AW4/new_pnts[0] & v1/new_pnts[1], same as CW9 & CW2
CW3 = rPointP(C, 'CW3', new_pnts[2]) # lower points
CW8 = rPointP(C, 'CW8', new_pnts[3]) # upper points
# Waistfacing back side section
connectorPoints = pointList(CW8, CW3) # 2 connector points from waistfacing above, upper = CW8, lower = CW3
moveObject = pointList(BW5, w2, v2, BW4)
new_pnts = connectObjects(connectorPoints, moveObject)
# skip BW5/new_pnts[0] & w2/new_pnts[1], same as CW8 & CW3
CW4 = rPointP(C, 'CW4', new_pnts[2]) # lower points
CW7 = rPointP(C, 'CW7', new_pnts[3]) # upper points
# Waistfacing back center section
connectorPoints = pointList(CW7, CW4) # 2 connector points from waistfacing above, upper = CW7, lower = CW4
moveObject = pointList(BW2, u2, t2, BW1)
new_pnts = connectObjects(connectorPoints, moveObject)
# skip BW2/new_pnts[0] & u2/new_pnts[1], same as CW7 & CW4
CW5 = rPointP(C, 'CW5', new_pnts[2]) # lower points
CW6 = rPointP(C, 'CW6', new_pnts[3]) # upper points
# Waistfacing control points
# lower
pnts = pointList(CW1, CW2, CW3, CW4, CW5)
c1, c2 = controlPoints('WaistfacingLower', pnts)
CW2_c1, CW2_c2 = cPointP(C, 'CW2_c1', c1[0]), cPointP(C, 'CW2_c2', c2[0]) # b/w CW1 & CW2
CW3_c1, CW3_c2 = cPointP(C, 'CW3_c1', c1[1]), cPointP(C, 'CW3_c2', c2[1]) # b/w CW2 & CW3
CW4_c1, CW4_c2 = cPointP(C, 'CW4_c1', c1[2]), cPointP(C, 'CW4_c2', c2[2]) # b/w CW2 & CW4
CW5_c1, CW5_c2 = cPointP(C, 'CW5_c1', c1[3]), cPointP(C, 'CW5_c2', c2[3]) # b/w CW4 & CW5
# upper
pnts = pointList(CW6, CW7, CW8, CW9, CW10)
c1, c2 = controlPoints('WaistfacingUpper', pnts)
CW7_c1, CW7_c2 = cPointP(C, 'CW7_c1', c1[0]), cPointP(C, 'CW7_c2', c2[0]) # b/w CW6 & CW7
CW8_c1, CW8_c2 = cPointP(C, 'CW8_c1', c1[1]), cPointP(C, 'CW8_c2', c2[1]) # b/w CW7 & CW8
CW9_c1, CW9_c2 = cPointP(C, 'CW9_c1', c1[2]), cPointP(C, 'CW9_c2', c2[2]) # b/w CW8 & CW9
CW10_c1, CW10_c2 = cPointP(C, 'CW10_c1', c1[3]), cPointP(C, 'CW10_c2', c2[3]) # b/w CW9 & CW10
# grainline points & label location
CG1 = rPoint(C, 'CG1', CW6.x, CW6.y + (abs(CW6.y - CW7.y)/2.0))
CG2 = rPointP(C, 'CG2', pntFromDistanceAndAngleP(CG1, 6.5*IN, angleOfDegree(45.0)))
C.label_x, C.label_y = CW6.x + 0.25*IN, CW6.y + 0.25*IN
# waistfacing grid
grid = path()
addToPath(grid, 'M', CW1, 'L', CW2, 'L', CW3, 'L', CW4, 'L', CW5, 'L', CW6, 'L', CW7, 'L', CW8, 'L', CW9, 'L', CW10, 'L', CW1)
seamLine = path()
cuttingLine = path()
for p in seamLine, cuttingLine:
addToPath(p, 'M', CW1, 'C', CW2_c1, CW2_c2, CW2, 'C', CW3_c1, CW3_c2, CW3, 'C', CW4_c1, CW4_c2, CW4, 'C', CW5_c1, CW5_c2, CW5) # lower waistband
addToPath(p, 'L', CW6, 'C', CW7_c1, CW7_c2, CW7, 'C', CW8_c1, CW8_c2, CW8, 'C', CW9_c1, CW9_c2, CW9, 'C', CW10_c1, CW10_c2, CW10, 'L', CW1) # upper waistband
# add grid, grainline, seamline & cuttingline paths to pattern
addGrainLine(C, CG1, CG2)
addGridLine(C, grid)
addSeamLine(C, seamLine)
addCuttingLine(C, cuttingLine)
# call draw once for the entire pattern
doc.draw()
return
# vi:set ts=4 sw=4 expandtab:
| gpl-3.0 | -956,837,025,002,564,000 | 55.174224 | 191 | 0.59808 | false |
twinaphex/pcsx2 | 3rdparty/portaudio/doc/utils/checkfiledocs.py | 70 | 2415 | import os
import os.path
import string
paRootDirectory = '../../'
paHtmlDocDirectory = os.path.join( paRootDirectory, "doc", "html" )
## Script to check documentation status
## this script assumes that html doxygen documentation has been generated
##
## it then walks the entire portaudio source tree and check that
## - every source file (.c,.h,.cpp) has a doxygen comment block containing
## - a @file directive
## - a @brief directive
## - a @ingroup directive
## - it also checks that a corresponding html documentation file has been generated.
##
## This can be used as a first-level check to make sure the documentation is in order.
##
## The idea is to get a list of which files are missing doxygen documentation.
# recurse from top and return a list of all with the given
# extensions. ignore .svn directories. return absolute paths
def recursiveFindFiles( top, extensions, includePaths ):
result = []
for (dirpath, dirnames, filenames) in os.walk(top):
if not '.svn' in dirpath:
for f in filenames:
if os.path.splitext(f)[1] in extensions:
if includePaths:
result.append( os.path.abspath( os.path.join( dirpath, f ) ) )
else:
result.append( f )
return result
# generate the html file name that doxygen would use for
# a particular source file. this is a brittle conversion
# which i worked out by trial and error
def doxygenHtmlDocFileName( sourceFile ):
return sourceFile.replace( '_', '__' ).replace( '.', '_8' ) + '.html'
sourceFiles = recursiveFindFiles( paRootDirectory, [ '.c', '.h', '.cpp' ], True );
docFiles = recursiveFindFiles( paHtmlDocDirectory, [ '.html' ], False );
currentFile = ""
def printError( f, message ):
global currentFile
if f != currentFile:
currentFile = f
print f, ":"
print "\t!", message
for f in sourceFiles:
if not doxygenHtmlDocFileName( os.path.basename(f) ) in docFiles:
printError( f, "no doxygen generated doc page" )
s = file( f, 'rt' ).read()
if not '/**' in s:
printError( f, "no doxygen /** block" )
if not '@file' in s:
printError( f, "no doxygen @file tag" )
if not '@brief' in s:
printError( f, "no doxygen @brief tag" )
if not '@ingroup' in s:
printError( f, "no doxygen @ingroup tag" )
| lgpl-3.0 | 8,399,591,121,217,035,000 | 30.363636 | 86 | 0.633954 | false |
Asana/boto | tests/unit/vpc/test_vpc.py | 100 | 13752 | # -*- coding: UTF-8 -*-
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, VPC
from boto.ec2.securitygroup import SecurityGroup
DESCRIBE_VPCS = b'''<?xml version="1.0" encoding="UTF-8"?>
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>623040d1-b51c-40bc-8080-93486f38d03d</requestId>
<vpcSet>
<item>
<vpcId>vpc-12345678</vpcId>
<state>available</state>
<cidrBlock>172.16.0.0/16</cidrBlock>
<dhcpOptionsId>dopt-12345678</dhcpOptionsId>
<instanceTenancy>default</instanceTenancy>
<isDefault>false</isDefault>
</item>
</vpcSet>
</DescribeVpcsResponse>'''
class TestDescribeVPCs(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return DESCRIBE_VPCS
def test_get_vpcs(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_vpcs()
self.assertEqual(len(api_response), 1)
vpc = api_response[0]
self.assertFalse(vpc.is_default)
self.assertEqual(vpc.instance_tenancy, 'default')
class TestCreateVpc(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpc>
<vpcId>vpc-1a2b3c4d</vpcId>
<state>pending</state>
<cidrBlock>10.0.0.0/16</cidrBlock>
<dhcpOptionsId>dopt-1a2b3c4d2</dhcpOptionsId>
<instanceTenancy>default</instanceTenancy>
<tagSet/>
</vpc>
</CreateVpcResponse>
"""
def test_create_vpc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_vpc('10.0.0.0/16', 'default')
self.assert_request_parameters({
'Action': 'CreateVpc',
'InstanceTenancy': 'default',
'CidrBlock': '10.0.0.0/16'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, VPC)
self.assertEquals(api_response.id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.cidr_block, '10.0.0.0/16')
self.assertEquals(api_response.dhcp_options_id, 'dopt-1a2b3c4d2')
self.assertEquals(api_response.instance_tenancy, 'default')
class TestDeleteVpc(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcResponse>
"""
def test_delete_vpc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_vpc('vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'DeleteVpc',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestModifyVpcAttribute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ModifyVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</ModifyVpcAttributeResponse>
"""
def test_modify_vpc_attribute_dns_support(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.modify_vpc_attribute(
'vpc-1a2b3c4d', enable_dns_support=True)
self.assert_request_parameters({
'Action': 'ModifyVpcAttribute',
'VpcId': 'vpc-1a2b3c4d',
'EnableDnsSupport.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_modify_vpc_attribute_dns_hostnames(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.modify_vpc_attribute(
'vpc-1a2b3c4d', enable_dns_hostnames=True)
self.assert_request_parameters({
'Action': 'ModifyVpcAttribute',
'VpcId': 'vpc-1a2b3c4d',
'EnableDnsHostnames.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestGetAllClassicLinkVpc(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>2484655d-d669-4950-bf55-7ba559805d36</requestId>
<vpcSet>
<item>
<vpcId>vpc-6226ab07</vpcId>
<classicLinkEnabled>false</classicLinkEnabled>
<tagSet>
<item>
<key>Name</key>
<value>hello</value>[
</item>
</tagSet>
</item>
<item>
<vpcId>vpc-9d24f8f8</vpcId>
<classicLinkEnabled>true</classicLinkEnabled>
<tagSet/>
</item>
</vpcSet>
</DescribeVpcClassicLinkResponse>
"""
def test_get_all_classic_link_vpcs(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_classic_link_vpcs()
self.assertEqual(len(response), 2)
vpc = response[0]
self.assertEqual(vpc.id, 'vpc-6226ab07')
self.assertEqual(vpc.classic_link_enabled, 'false')
self.assertEqual(vpc.tags, {'Name': 'hello'})
def test_get_all_classic_link_vpcs_params(self):
self.set_http_response(status_code=200)
self.service_connection.get_all_classic_link_vpcs(
vpc_ids=['id1', 'id2'],
filters={'GroupId': 'sg-9b4343fe'},
dry_run=True,
)
self.assert_request_parameters({
'Action': 'DescribeVpcClassicLink',
'VpcId.1': 'id1',
'VpcId.2': 'id2',
'Filter.1.Name': 'GroupId',
'Filter.1.Value.1': 'sg-9b4343fe',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestVpcClassicLink(AWSMockServiceTestCase):
connection_class = VPCConnection
def setUp(self):
super(TestVpcClassicLink, self).setUp()
self.vpc = VPC(self.service_connection)
self.vpc_id = 'myid'
self.vpc.id = self.vpc_id
class TestAttachClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<AttachClassicLinkVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>88673bdf-cd16-40bf-87a1-6132fec47257</requestId>
<return>true</return>
</AttachClassicLinkVpcResponse>
"""
def test_attach_classic_link_instance_string_groups(self):
groups = ['sg-foo', 'sg-bar']
self.set_http_response(status_code=200)
response = self.vpc.attach_classic_instance(
instance_id='my_instance_id',
groups=groups,
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'AttachClassicLinkVpc',
'VpcId': self.vpc_id,
'InstanceId': 'my_instance_id',
'SecurityGroupId.1': 'sg-foo',
'SecurityGroupId.2': 'sg-bar',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_attach_classic_link_instance_object_groups(self):
sec_group_1 = SecurityGroup()
sec_group_1.id = 'sg-foo'
sec_group_2 = SecurityGroup()
sec_group_2.id = 'sg-bar'
groups = [sec_group_1, sec_group_2]
self.set_http_response(status_code=200)
response = self.vpc.attach_classic_instance(
instance_id='my_instance_id',
groups=groups,
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'AttachClassicLinkVpc',
'VpcId': self.vpc_id,
'InstanceId': 'my_instance_id',
'SecurityGroupId.1': 'sg-foo',
'SecurityGroupId.2': 'sg-bar',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestDetachClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<DetachClassicLinkVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>5565033d-1321-4eef-b121-6aa46f152ed7</requestId>
<return>true</return>
</DetachClassicLinkVpcResponse>
"""
def test_detach_classic_link_instance(self):
self.set_http_response(status_code=200)
response = self.vpc.detach_classic_instance(
instance_id='my_instance_id',
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'DetachClassicLinkVpc',
'VpcId': self.vpc_id,
'InstanceId': 'my_instance_id',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestEnableClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<EnableVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>4ab2b2b3-a267-4366-a070-bab853b5927d</requestId>
<return>true</return>
</EnableVpcClassicLinkResponse>
"""
def test_enable_classic_link(self):
self.set_http_response(status_code=200)
response = self.vpc.enable_classic_link(
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'EnableVpcClassicLink',
'VpcId': self.vpc_id,
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestDisableClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<DisableVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>4ab2b2b3-a267-4366-a070-bab853b5927d</requestId>
<return>true</return>
</DisableVpcClassicLinkResponse>
"""
def test_enable_classic_link(self):
self.set_http_response(status_code=200)
response = self.vpc.disable_classic_link(
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'DisableVpcClassicLink',
'VpcId': self.vpc_id,
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestUpdateClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<DescribeVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>2484655d-d669-4950-bf55-7ba559805d36</requestId>
<vpcSet>
<item>
<vpcId>myid</vpcId>
<classicLinkEnabled>true</classicLinkEnabled>
<tagSet/>
</item>
</vpcSet>
</DescribeVpcClassicLinkResponse>
"""
def test_vpc_update_classic_link_enabled(self):
self.vpc.classic_link_enabled = False
self.set_http_response(status_code=200)
self.vpc.update_classic_link_enabled(
dry_run=True,
validate=True
)
self.assert_request_parameters({
'Action': 'DescribeVpcClassicLink',
'VpcId.1': self.vpc_id,
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
self.assertEqual(self.vpc.classic_link_enabled, 'true')
if __name__ == '__main__':
unittest.main()
| mit | -8,327,067,811,609,021,000 | 36.47139 | 93 | 0.568935 | false |
Parrot-Developers/ardupilot | mk/PX4/Tools/gencpp/src/gencpp/__init__.py | 214 | 9473 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import genmsg.msgs
try:
from cStringIO import StringIO #Python 2.x
except ImportError:
from io import StringIO #Python 3.x
MSG_TYPE_TO_CPP = {'byte': 'int8_t',
'char': 'uint8_t',
'bool': 'uint8_t',
'uint8': 'uint8_t',
'int8': 'int8_t',
'uint16': 'uint16_t',
'int16': 'int16_t',
'uint32': 'uint32_t',
'int32': 'int32_t',
'uint64': 'uint64_t',
'int64': 'int64_t',
'float32': 'float',
'float64': 'double',
'string': 'std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > ',
'time': 'ros::Time',
'duration': 'ros::Duration'}
#used
def msg_type_to_cpp(type):
"""
Converts a message type (e.g. uint32, std_msgs/String, etc.) into the C++ declaration
for that type (e.g. uint32_t, std_msgs::String_<ContainerAllocator>)
@param type: The message type
@type type: str
@return: The C++ declaration
@rtype: str
"""
(base_type, is_array, array_len) = genmsg.msgs.parse_type(type)
cpp_type = None
if (genmsg.msgs.is_builtin(base_type)):
cpp_type = MSG_TYPE_TO_CPP[base_type]
elif (len(base_type.split('/')) == 1):
if (genmsg.msgs.is_header_type(base_type)):
cpp_type = ' ::std_msgs::Header_<ContainerAllocator> '
else:
cpp_type = '%s_<ContainerAllocator> '%(base_type)
else:
pkg = base_type.split('/')[0]
msg = base_type.split('/')[1]
cpp_type = ' ::%s::%s_<ContainerAllocator> '%(pkg, msg)
if (is_array):
if (array_len is None):
return 'std::vector<%s, typename ContainerAllocator::template rebind<%s>::other > '%(cpp_type, cpp_type)
else:
return 'boost::array<%s, %s> '%(cpp_type, array_len)
else:
return cpp_type
def _escape_string(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return s
def escape_message_definition(definition):
lines = definition.splitlines()
if not lines:
lines.append('')
s = StringIO()
for line in lines:
line = _escape_string(line)
s.write('%s\\n\\\n'%(line))
val = s.getvalue()
s.close()
return val
#used2
def cpp_message_declarations(name_prefix, msg):
"""
Returns the different possible C++ declarations for a message given the message itself.
@param name_prefix: The C++ prefix to be prepended to the name, e.g. "std_msgs::"
@type name_prefix: str
@param msg: The message type
@type msg: str
@return: A tuple of 3 different names. cpp_message_decelarations("std_msgs::", "String") returns the tuple
("std_msgs::String_", "std_msgs::String_<ContainerAllocator>", "std_msgs::String")
@rtype: str
"""
pkg, basetype = genmsg.names.package_resource_name(msg)
cpp_name = ' ::%s%s'%(name_prefix, msg)
if (pkg):
cpp_name = ' ::%s::%s'%(pkg, basetype)
return ('%s_'%(cpp_name), '%s_<ContainerAllocator> '%(cpp_name), '%s'%(cpp_name))
#todo
def is_fixed_length(spec, msg_context, includepath):
"""
Returns whether or not the message is fixed-length
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param package: The package of the
@type package: str
"""
types = []
for field in spec.parsed_fields():
if (field.is_array and field.array_len is None):
return False
if (field.base_type == 'string'):
return False
if (not field.is_builtin):
types.append(field.base_type)
types = set(types)
for t in types:
t = genmsg.msgs.resolve_type(t, spec.package)
assert isinstance(includepath, dict)
new_spec = genmsg.msg_loader.load_msg_by_type(msg_context, t, includepath)
if (not is_fixed_length(new_spec, msg_context, includepath)):
return False
return True
#used2
def default_value(type):
"""
Returns the value to initialize a message member with. 0 for integer types, 0.0 for floating point, false for bool,
empty string for everything else
@param type: The type
@type type: str
"""
if type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64']:
return '0'
elif type in ['float32', 'float64']:
return '0.0'
elif type == 'bool':
return 'false'
return ""
#used2
def takes_allocator(type):
"""
Returns whether or not a type can take an allocator in its constructor. False for all builtin types except string.
True for all others.
@param type: The type
@type: str
"""
return not type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'bool', 'time', 'duration']
def escape_string(str):
str = str.replace('\\', '\\\\')
str = str.replace('"', '\\"')
return str
#used
def generate_fixed_length_assigns(spec, container_gets_allocator, cpp_name_prefix):
"""
Initialize any fixed-length arrays
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
@param cpp_name_prefix: The C++ prefix to use when referring to the message, e.g. "std_msgs::"
@type cpp_name_prefix: str
"""
# Assign all fixed-length arrays their default values
for field in spec.parsed_fields():
if (not field.is_array or field.array_len is None):
continue
val = default_value(field.base_type)
if (container_gets_allocator and takes_allocator(field.base_type)):
# String is a special case, as it is the only builtin type that takes an allocator
if (field.base_type == "string"):
string_cpp = msg_type_to_cpp("string")
yield ' %s.assign(%s(_alloc));\n'%(field.name, string_cpp)
else:
(cpp_msg_unqualified, cpp_msg_with_alloc, _) = cpp_message_declarations(cpp_name_prefix, field.base_type)
yield ' %s.assign(%s(_alloc));\n'%(field.name, cpp_msg_with_alloc)
elif (len(val) > 0):
yield ' %s.assign(%s);\n'%(field.name, val)
#used
def generate_initializer_list(spec, container_gets_allocator):
"""
Writes the initializer list for a constructor
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
"""
op = ':'
for field in spec.parsed_fields():
val = default_value(field.base_type)
use_alloc = takes_allocator(field.base_type)
if (field.is_array):
if (field.array_len is None and container_gets_allocator):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s()'%(op, field.name)
else:
if (container_gets_allocator and use_alloc):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s(%s)'%(op, field.name, val)
op = ','
| gpl-3.0 | -5,045,322,676,413,847,000 | 36.295276 | 140 | 0.611527 | false |
analyseuc3m/ANALYSE-v1 | openedx/core/djangolib/markup.py | 5 | 1091 | """
Utilities for use in Mako markup.
"""
import markupsafe
# Text() can be used to declare a string as plain text, as HTML() is used
# for HTML. It simply wraps markupsafe's escape, which will HTML-escape if
# it isn't already escaped.
Text = markupsafe.escape # pylint: disable=invalid-name
def HTML(html): # pylint: disable=invalid-name
"""
Mark a string as already HTML, so that it won't be escaped before output.
Use this function when formatting HTML into other strings. It must be
used in conjunction with ``Text()``, and both ``HTML()`` and ``Text()``
must be closed before any calls to ``format()``::
<%page expression_filter="h"/>
<%!
from django.utils.translation import ugettext as _
from openedx.core.djangolib.markup import Text, HTML
%>
${Text(_("Write & send {start}email{end}")).format(
start=HTML("<a href='mailto:{}'>".format(user.email),
end=HTML("</a>"),
)}
"""
return markupsafe.Markup(html)
| agpl-3.0 | -2,623,674,539,528,092,700 | 31.088235 | 78 | 0.5967 | false |
dagwieers/ansible | test/units/modules/network/aireos/test_aireos_command.py | 52 | 4292 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.aireos import aireos_command
from units.modules.utils import set_module_args
from .aireos_module import TestCiscoWlcModule, load_fixture
class TestCiscoWlcCommandModule(TestCiscoWlcModule):
module = aireos_command
def setUp(self):
super(TestCiscoWlcCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.aireos.aireos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestCiscoWlcCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_aireos_command_simple(self):
set_module_args(dict(commands=['show sysinfo']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Manufacturer\'s Name'))
def test_aireos_command_multiple(self):
set_module_args(dict(commands=['show sysinfo', 'show sysinfo']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Manufacturer\'s Name'))
def test_aireos_command_wait_for(self):
wait_for = 'result[0] contains "Cisco Systems Inc"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for))
self.execute_module()
def test_aireos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_aireos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_aireos_command_match_any(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, match='any'))
self.execute_module()
def test_aireos_command_match_all(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "Cisco Controller"']
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, match='all'))
self.execute_module()
def test_aireos_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "test string"']
commands = ['show sysinfo', 'show sysinfo']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 | -754,908,003,244,684,400 | 39.11215 | 100 | 0.65261 | false |
felliott/osf.io | osf_tests/test_registration_moderation_notifications.py | 4 | 24500 | import pytest
import mock
from mock import call
from django.utils import timezone
from osf.management.commands.add_notification_subscription import add_reviews_notification_setting
from osf.management.commands.populate_registration_provider_notification_subscriptions import populate_registration_provider_notification_subscriptions
from osf.migrations import update_provider_auth_groups
from osf.models import Brand, NotificationDigest
from osf.models.action import RegistrationAction
from osf.utils import machines
from osf.utils.notifications import (
notify_submit,
notify_accept_reject,
notify_moderator_registration_requests_withdrawal,
notify_reject_withdraw_request,
notify_withdraw_registration
)
from osf.utils.workflows import RegistrationModerationTriggers, RegistrationModerationStates
from osf_tests.factories import (
RegistrationFactory,
AuthUserFactory,
RetractionFactory
)
from website import mails, settings
from website.notifications import emails, tasks
from website.reviews import listeners
def get_moderator(provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
return user
def get_daily_moderator(provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
for subscription_type in provider.DEFAULT_SUBSCRIPTIONS:
subscription = provider.notification_subscriptions.get(event_name=subscription_type)
subscription.add_user_to_subscription(user, 'email_digest')
return user
# Set USE_EMAIL to true and mock out the default mailer for consistency with other mocked settings
@mock.patch('website.mails.settings.USE_EMAIL', True)
@mock.patch('website.mails.tasks.send_email', mock.MagicMock())
@pytest.mark.django_db
class TestRegistrationMachineNotification:
MOCK_NOW = timezone.now()
@pytest.yield_fixture(autouse=True)
def setup(self):
populate_registration_provider_notification_subscriptions()
with mock.patch('osf.utils.machines.timezone.now', return_value=self.MOCK_NOW):
yield
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def registration(self, admin, contrib):
registration = RegistrationFactory(creator=admin)
registration.add_contributor(admin, 'admin')
registration.add_contributor(contrib, 'write')
update_provider_auth_groups()
return registration
@pytest.fixture()
def registration_with_retraction(self, admin, contrib):
sanction = RetractionFactory(user=admin)
registration = sanction.target_registration
registration.update_moderation_state()
registration.add_contributor(admin, 'admin')
registration.add_contributor(contrib, 'write')
registration.save()
return registration
@pytest.fixture()
def provider(self, registration):
return registration.provider
@pytest.fixture()
def moderator(self, provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
return user
@pytest.fixture()
def daily_moderator(self, provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
for subscription_type in provider.DEFAULT_SUBSCRIPTIONS:
subscription = provider.notification_subscriptions.get(event_name=subscription_type)
subscription.add_user_to_subscription(user, 'email_digest')
return user
@pytest.fixture()
def accept_action(self, registration, admin):
registration_action = RegistrationAction.objects.create(
creator=admin,
target=registration,
trigger=RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name,
from_state=RegistrationModerationStates.INITIAL.db_name,
to_state=RegistrationModerationStates.ACCEPTED.db_name,
comment='yo'
)
return registration_action
@pytest.fixture()
def withdraw_request_action(self, registration, admin):
registration_action = RegistrationAction.objects.create(
creator=admin,
target=registration,
trigger=RegistrationModerationTriggers.REQUEST_WITHDRAWAL.db_name,
from_state=RegistrationModerationStates.ACCEPTED.db_name,
to_state=RegistrationModerationStates.PENDING_WITHDRAW.db_name,
comment='yo'
)
return registration_action
@pytest.fixture()
def withdraw_action(self, registration, admin):
registration_action = RegistrationAction.objects.create(
creator=admin,
target=registration,
trigger=RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name,
from_state=RegistrationModerationStates.PENDING_WITHDRAW.db_name,
to_state=RegistrationModerationStates.WITHDRAWN.db_name,
comment='yo'
)
return registration_action
def test_submit_notifications(self, registration, moderator, admin, contrib, provider):
"""
[REQS-96] "As moderator of branded registry, I receive email notification upon admin author(s) submission approval"
:param mock_email:
:param draft_registration:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call/args and also implicitly ensures
# that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(listeners.mails, 'send_mail', side_effect=send_mail) as mock_send_mail:
notify_submit(registration, admin)
assert len(mock_send_mail.call_args_list) == 2
admin_message, contrib_message = mock_send_mail.call_args_list
assert admin_message == call(
admin.email,
mails.REVIEWS_SUBMISSION_CONFIRMATION,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_creator=True,
logo='osf_registries',
no_future_emails=[],
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_name=provider.name,
provider_url='http://localhost:5000/',
referrer=admin,
reviewable=registration,
user=admin,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.REVIEWS_SUBMISSION_CONFIRMATION,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_creator=False,
logo='osf_registries',
no_future_emails=[],
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_name=provider.name,
provider_url='http://localhost:5000/',
referrer=admin,
reviewable=registration,
user=contrib,
workflow=None
)
assert NotificationDigest.objects.count() == 1
digest = NotificationDigest.objects.last()
assert digest.user == moderator
assert digest.send_type == 'email_transactional'
assert digest.event == 'new_pending_submissions'
def test_accept_notifications(self, registration, moderator, admin, contrib, accept_action):
"""
[REQS-98] "As registration authors, we receive email notification upon moderator acceptance"
:param draft_registration:
:return:
"""
add_reviews_notification_setting('global_reviews')
# Set up mock_email as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders correctly.
store_emails = emails.store_emails
with mock.patch.object(emails, 'store_emails', side_effect=store_emails) as mock_email:
notify_accept_reject(registration, registration.creator, accept_action, RegistrationModerationStates)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
[admin._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=True,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
assert contrib_message == call(
[contrib._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=False,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
reviewable=registration,
requester=admin,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
def test_reject_notifications(self, registration, moderator, admin, contrib, accept_action):
"""
[REQS-100] "As authors of rejected by moderator registration, we receive email notification of registration returned
to draft state"
:param draft_registration:
:return:
"""
add_reviews_notification_setting('global_reviews')
# Set up mock_email as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders correctly
store_emails = emails.store_emails
with mock.patch.object(emails, 'store_emails', side_effect=store_emails) as mock_email:
notify_accept_reject(registration, registration.creator, accept_action, RegistrationModerationStates)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
[admin._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=True,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
reviewable=registration,
requester=admin,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
assert contrib_message == call(
[contrib._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=False,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
reviewable=registration,
requester=admin,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
def test_notify_moderator_registration_requests_withdrawal_notifications(self, moderator, daily_moderator, registration, admin, provider):
"""
[REQS-106] "As moderator, I receive registration withdrawal request notification email"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
assert NotificationDigest.objects.count() == 0
notify_moderator_registration_requests_withdrawal(registration, admin)
assert NotificationDigest.objects.count() == 2
daily_digest = NotificationDigest.objects.get(send_type='email_digest')
transactional_digest = NotificationDigest.objects.get(send_type='email_transactional')
assert daily_digest.user == daily_moderator
assert transactional_digest.user == moderator
for digest in (daily_digest, transactional_digest):
assert 'requested withdrawal' in digest.message
assert digest.event == 'new_pending_withdraw_requests'
assert digest.provider == provider
def test_withdrawal_registration_accepted_notifications(self, registration_with_retraction, contrib, admin, withdraw_action):
"""
[REQS-109] "As registration author(s) requesting registration withdrawal, we receive notification email of moderator
decision"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(machines.mails, 'send_mail', side_effect=send_mail) as mock_email:
notify_withdraw_registration(registration_with_retraction, withdraw_action)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
admin.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=admin,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=True,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=contrib,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=False,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
def test_withdrawal_registration_rejected_notifications(self, registration, contrib, admin, withdraw_request_action):
"""
[REQS-109] "As registration author(s) requesting registration withdrawal, we receive notification email of moderator
decision"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(machines.mails, 'send_mail', side_effect=send_mail) as mock_email:
notify_reject_withdraw_request(registration, withdraw_request_action)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
admin.email,
mails.WITHDRAWAL_REQUEST_DECLINED,
contributor=admin,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_requester=True,
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.WITHDRAWAL_REQUEST_DECLINED,
contributor=contrib,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_requester=False,
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration,
workflow=None
)
def test_withdrawal_registration_force_notifications(self, registration_with_retraction, contrib, admin, withdraw_action):
"""
[REQS-109] "As registration author(s) requesting registration withdrawal, we receive notification email of moderator
decision"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(machines.mails, 'send_mail', side_effect=send_mail) as mock_email:
notify_withdraw_registration(registration_with_retraction, withdraw_action)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
admin.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=admin,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=True,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=contrib,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=False,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
@pytest.mark.parametrize(
'digest_type, expected_recipient',
[('email_transactional', get_moderator), ('email_digest', get_daily_moderator)]
)
def test_submissions_and_withdrawals_both_appear_in_moderator_digest(self, digest_type, expected_recipient, registration, admin, provider):
# Invoke the fixture function to get the recipient because parametrize
expected_recipient = expected_recipient(provider)
with mock.patch('website.reviews.listeners.mails.send_mail'):
notify_submit(registration, admin)
notify_moderator_registration_requests_withdrawal(registration, admin)
# One user, one provider => one email
grouped_notifications = list(tasks.get_moderators_emails(digest_type))
assert len(grouped_notifications) == 1
moderator_message = grouped_notifications[0]
assert moderator_message['user_id'] == expected_recipient._id
assert moderator_message['provider_id'] == provider.id
# No fixed ordering of the entires, so just make sure that
# keywords for each action type are in some message
updates = moderator_message['info']
assert len(updates) == 2
assert any('submitted' in entry['message'] for entry in updates)
assert any('requested withdrawal' in entry['message'] for entry in updates)
@pytest.mark.parametrize('digest_type', ['email_transactional', 'email_digest'])
def test_submsissions_and_withdrawals_do_not_appear_in_node_digest(self, digest_type, registration, admin, moderator, daily_moderator):
notify_submit(registration, admin)
notify_moderator_registration_requests_withdrawal(registration, admin)
assert not list(tasks.get_users_emails(digest_type))
def test_moderator_digest_emails_render(self, registration, admin, moderator):
notify_moderator_registration_requests_withdrawal(registration, admin)
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(tasks.mails, 'send_mail', side_effect=send_mail) as mock_send_mail:
tasks._send_reviews_moderator_emails('email_transactional')
mock_send_mail.assert_called()
def test_branded_provider_notification_renders(self, registration, admin, moderator):
# Set brand details to be checked in notify_base.mako
provider = registration.provider
provider.brand = Brand.objects.create(hero_logo_image='not-a-url', primary_color='#FFA500')
provider.name = 'Test Provider'
provider.save()
# Implicitly check that all of our uses of notify_base.mako render with branded details:
#
# notify_submit renders reviews_submission_confirmation using context from
# osf.utils.notifications and stores emails to be picked up in the moderator digest
#
# _send_Reviews_moderator_emails renders digest_reviews_moderators using context from
# website.notifications.tasks
notify_submit(registration, admin)
tasks._send_reviews_moderator_emails('email_transactional')
assert True # everything rendered!
| apache-2.0 | -8,267,121,595,218,933,000 | 40.107383 | 151 | 0.643592 | false |
SSSD/sssd | src/tests/multihost/adsites/test_adsites.py | 1 | 13157 | from __future__ import print_function
import time
import pytest
from sssd.testlib.common.utils import sssdTools
@pytest.mark.adsites
class Testadsites(object):
"""
@Title: IDM-SSSD-TC: ad_provider: adsites:
Improve AD site discovery process
Test cases for BZ: 1819012
@Steps:
1. Join client to AD
2. Start SSSD and enable debug
3. Create secondary site, move second domain controller to second site
"""
@pytest.mark.adsites
def test_001_ad_startup_discovery(self, multihost, adjoin):
"""
@Title: IDM-SSSD-TC: ad_startup_discovery
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
multihost.client[0].service_sssd('start')
cmd_id = 'id Administrator@%s' % domain
multihost.client[0].run_command(cmd_id)
cmd_check_ping = 'grep -ire ad_cldap_ping_send ' \
'/var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 0 or check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in' \
' domain Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
@pytest.mark.adsites
def test_002_ad_startup_discovery_one_server_unreachable(self, multihost,
adjoin):
"""
@Title: IDM-SSSD-TC: ad_startup_discovery_one_server_unreachable
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
ad2ip = multihost.ad[1].ip
cmd_dnf_firewalld = 'dnf install -y firewalld'
multihost.client[0].run_command(cmd_dnf_firewalld)
cmd_start_firewalld = 'systemctl start firewalld'
multihost.client[0].run_command(cmd_start_firewalld)
fw_add = 'firewall-cmd --permanent --direct --add-rule ipv4 ' \
'filter OUTPUT 0 -d %s -j DROP' % ad2ip
fw_reload = 'firewall-cmd --reload'
multihost.client[0].run_command(fw_add, raiseonerr=True)
multihost.client[0].run_command(fw_reload, raiseonerr=True)
multihost.client[0].service_sssd('start')
cmd_check_ping = 'grep -ire ad_cldap_ping_send ' \
'/var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 1 and check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain' \
' controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
fw_stop = 'systemctl stop firewalld'
multihost.client[0].run_command(fw_stop, raiseonerr=True)
fw_remove = 'dnf remove -y firewalld'
multihost.client[0].run_command(fw_remove, raiseonerr=True)
@pytest.mark.adsites
def test_003_ad_startup_discovery_two_different_sites(self, multihost,
adjoin, create_site):
"""
@Title: IDM-SSSD-TC: ad_startup_discovery_two_different_sites
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
multihost.client[0].service_sssd('start')
cmd_check_ping = 'grep -ire ad_cldap_ping_send' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 0 or check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain' \
' controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
@pytest.mark.adsites
def test_004_ad_startup_discovery_one_server_unreachable(self,
multihost,
adjoin,
create_site):
"""
@Title: IDM-SSSD-TC:
ad_startup_discovery_two_different_sites_one_server_unreachable
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
ad2ip = multihost.ad[1].ip
cmd_dnf_firewalld = 'dnf install -y firewalld'
multihost.client[0].run_command(cmd_dnf_firewalld)
cmd_start_firewalld = 'systemctl start firewalld'
multihost.client[0].run_command(cmd_start_firewalld)
fw_add = 'firewall-cmd --permanent --direct --add-rule ipv4 ' \
'filter OUTPUT 0 -d %s -j DROP' % ad2ip
fw_reload = 'firewall-cmd --reload'
multihost.client[0].run_command(fw_add, raiseonerr=True)
multihost.client[0].run_command(fw_reload, raiseonerr=True)
multihost.client[0].service_sssd('start')
cmd_check_ping = 'grep -ire ad_cldap_ping_send' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 1 and check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain' \
' controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
fw_stop = 'systemctl stop firewalld'
multihost.client[0].run_command(fw_stop, raiseonerr=True)
fw_remove = 'dnf remove -y firewalld'
multihost.client[0].run_command(fw_remove, raiseonerr=True)
| gpl-3.0 | 133,369,113,282,755,090 | 49.217557 | 79 | 0.512959 | false |
Solanar/CMPUT410-Project | DisSoNet/front/views/author.py | 1 | 4859 | from django.http import HttpResponse
import json
import socket
from data.models import Comment
def processRequestFromOtherServer(obj, dict_type):
json_dict = {}
json_dict_list = []
if dict_type is "author":
json_dict_list.append(getAuthorDict(obj))
elif dict_type is "posts":
for posts in obj:
json_dict_list.append(getPostDict(posts))
elif dict_type is "comments":
json_dict_list = getCommentDictList(obj)
else:
print ("Unknown type")
json_dict[dict_type] = json_dict_list
json_data = json.dumps(json_dict)
return HttpResponse(json_data, content_type="application/json")
def getAuthorDict(user_obj, include_url=False):
""" Take a list of author objects, returns it's dict representations.
"author":
{
"id":"sha1",
"host":"host",
"displayname":"name",
"url":"url_to_author"
},
:returns: dict representation of an author object
"""
author_dict = {}
author_dict["id"] = user_obj.guid
author_dict["displayname"] = user_obj.get_full_name()
host = socket.gethostname() # only works if website running on port 80
ip = "http://10.4.10.2" # dat hard coding of values
port = ":8080/"
author_dict["host"] = ip + port # host
# why is this here?
# if include_url:
# author_dict["url"] = author_object.url
author_dict["url"] = ip + port + "author/" + user_obj.guid + "/"
return author_dict
def getPostDict(post_object):
""" From all post URLS should return a list of posts like the following.
Of the form:
{ "posts":[{"title":"string",
"source":"url",
"origin":"url",
"description":"string",
"content-type":"text/*",
"content":"string",
"author":{"id":"sha1",
"host":"host",
"displayname":"name",
"url":"url_to_author"},
"categories":["cat1", "cat2"],
"comments":[{"author":{"id":"sha1",
"host":"url",
"displayname":"name"},
"comment":"string",
"pubDate":"date",
"guid":"sha1"}]
"pubdate":"date",
"guid":"sha1",
"visibility":"PUBLIC"}]}
This function will return the representation of a post to go into this list
"""
post_dict = {}
post_dict["title"] = post_object.title
post_dict["source"] = post_object.source
post_dict["origin"] = post_object.origin
post_dict["description"] = post_object.description
post_dict["content-type"] = post_object.content_type
post_dict["content"] = post_object.content
# TODO python datetime is not JSON serializable
formatter = "%a %b %d %H:%M:%S mst %Y"
timestring = post_object.published_date.strftime(formatter)
post_dict["pubDate"] = timestring
# post_dict["pubdate"] = post_object.published_date
post_dict["guid"] = post_object.guid
post_dict["visibility"] = post_object.visibility
# get the post author, convert to dict and add to post_dict
author_dict = getAuthorDict(post_object.author, include_url=True)
post_dict["author"] = author_dict
post_dict["categories"] = getCategoryList(post_object.categories)
# get all comments on this post of return them
comment_list = Comment.objects.filter(post=post_object)
comment_dict_list = getCommentDictList(comment_list)
post_dict["comments"] = comment_dict_list
return post_dict
def getCommentDictList(comment_list):
""" Take a list of comment objects, returns list of dict representations.
Of the form:
"comments":[{"author":{"id":"sha1",
"host":"url",
"displayname":"name"},
"comment":"string",
"pubDate":"date",
"guid":"sha1"}]
:returns: A list of dicts
"""
comment_dict_list = []
for comment in comment_list:
comment_dict = {}
author_dict = getAuthorDict(comment.user)
comment_dict["author"] = author_dict
comment_dict["comment"] = comment.content
# TODO python datetime is not JSON serializable
formatter = "%a %b %d %H:%M:%S mst %Y"
timestring = comment.published_date.strftime(formatter)
comment_dict["pubDate"] = timestring
comment_dict["guid"] = comment.guid
comment_dict_list.append(comment_dict)
return comment_dict_list
def getCategoryList(categories):
category_list = []
for category in categories.all():
category_list.append(category.category_name)
return category_list
| apache-2.0 | 2,352,532,585,635,758,600 | 32.979021 | 79 | 0.571928 | false |
apache/incubator-mxnet | tests/python/unittest/test_gluon_control_flow.py | 2 | 25390 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.test_utils import *
from mxnet.base import _as_list
from collections import defaultdict
from mxnet.attribute import AttrScope
@mx.util.use_np
def test_while_loop_simple_forward():
class _TestBlock(gluon.HybridBlock):
def __init__(self, cond, func, max_iterations):
super(_TestBlock, self).__init__()
self.cond = cond
self.func = func
self.max_iterations = max_iterations
def forward(self, *loop_vars):
return mx.npx.while_loop(
cond=self.cond,
func=self.func,
loop_vars=loop_vars,
max_iterations=self.max_iterations
)
for hybridize in [False, True]:
# Case 1.1: result should be sum([1, 2, 3 ... 100])
model = _TestBlock(
cond=lambda i, s: i <= 5,
func=lambda i, s: (None, (i + 1, s + i)),
max_iterations=10,
)
if hybridize:
model.hybridize()
_, result = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
)
assert result[0].item() == 6
assert result[1].item() == 15
# Case 1.2: result should be sum([1, 2, 3 ... 1000])
model = _TestBlock(
cond=lambda i, s, true: true,
func=lambda i, s, true: (None, (i + 1, s + i, true)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
_, result = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([1], dtype="int64"), # true
)
assert result[0].item() == 1001
assert result[1].item() == 500500
assert result[2].item() == 1
# Case 1.3: result should be sum([])
model = _TestBlock(
cond=lambda i, s, false: false,
func=lambda i, s, false: (None, (i + 1, s + i, false)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
_, result = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([0], dtype="int64"), # false
)
assert result[0].item() == 1
assert result[1].item() == 0
assert result[2].item() == 0
# Case 2.1: result should be sum([1, 2, 3 ... 100])
model = _TestBlock(
cond=lambda i, s: i <= 100,
func=lambda i, s: (i, (i + 1, s + i)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
outputs, (result_i, result_s) = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
)
assert all(outputs.asnumpy()[ : 100] == np.arange(1, 101).reshape(100, 1))
assert result_i.item() == 101
assert result_s.item() == 5050
# Case 2.2: result should be sum([1, 2, 3 ... 1000])
model = _TestBlock(
cond=lambda i, s, true: true,
func=lambda i, s, true: (i, (i + 1, s + i, true)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
outputs, (result_i, result_s, _) = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([1], dtype="int64"), # true
)
assert all(outputs.asnumpy() == np.arange(1, 1001).reshape(1000, 1))
assert result_i.item() == 1001
assert result_s.item() == 500500
# Case 2.3: a corner case, in which loop body is never executed
model = _TestBlock(
cond=lambda i, s, false: false,
func=lambda i, s, false: (i, (i + 1, s + i, false)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
_, (result_i, result_s, _) = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([0], dtype="int64"), # false
)
assert result_i.item() == 1
assert result_s.item() == 0
def test_cut_subgraph_foreach():
class TestLayer(gluon.HybridBlock):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, inputs, states):
def step1(data, states):
return data + 1, states
out1, states1 = mx.npx.foreach(step1, inputs, states)
out2, states2 = mx.npx.foreach(step1, out1, states)
def step2(data, states):
return data + states[0], states
out, states = mx.npx.foreach(step2, out2, states1)
return out
data = mx.np.random.normal(loc=0, scale=1, size=(5, 10))
states = mx.np.random.normal(loc=0, scale=1, size=(10))
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data, [states])
with mx.autograd.record():
res1 = layer(data, [states])
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data, [states])
with mx.autograd.record():
res2 = layer(data, [states])
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
@mx.util.use_np
def test_uniq_name():
class ForeachLayer1(gluon.HybridBlock):
def __init__(self):
super(ForeachLayer1, self).__init__()
def forward(self, inputs, states):
def step1(data, states):
return data + 1, states
out1, states1 = mx.npx.foreach(step1, inputs, states)
# The input variables have the same symbol name.
out, states = mx.npx.foreach(step1, out1, states1)
return out
class ForeachLayer2(gluon.HybridBlock):
def __init__(self):
super(ForeachLayer2, self).__init__()
def forward(self, inputs, states):
def step1(data, states):
return data + 1, states
out1, states1 = mx.npx.foreach(step1, inputs, states)
def step2(data, states):
return data, [states[0] + states[0] + mx.np.squeeze(mx.npx.slice(data, begin=0, end=1))]
# The input variables have the same symbol names.
# The free variables have the same symbol names as the input variables.
out, states = mx.npx.foreach(step2, out1, states1)
return out
class WhileLayer1(gluon.HybridBlock):
def __init__(self):
super(WhileLayer1, self).__init__()
def forward(self, inputs, states):
def cond(state1, state2):
s = mx.np.squeeze(mx.npx.slice(state1, begin=0, end=1))
return s == s
def step(state1, state2):
return state1 + 1, [state1 + 1, state2 + 1]
states = [states[0], states[0] + 1]
out1, states1 = mx.npx.while_loop(cond, step, states, max_iterations=5)
# The input variables have the same symbol name.
out, states = mx.npx.while_loop(cond, step, states1, max_iterations=5)
return out
class WhileLayer2(gluon.HybridBlock):
def __init__(self):
super(WhileLayer2, self).__init__()
def forward(self, inputs, states):
def cond(state1, state2):
s = mx.np.squeeze(mx.npx.slice(state1, begin=0, end=1))
return s == s
def step1(state1, state2):
return state1 + 1, [state1, state2]
states = [states[0], states[0] + 1]
out1, states1 = mx.npx.while_loop(cond, step1, states, max_iterations=5)
def step2(state1, state2):
return state1 + 1, [state1 + state1[0], state2 + state1[1]]
# The input variables have the same symbol name.
out, states = mx.npx.while_loop(cond, step2, states1, max_iterations=5)
return out
TestLayers = [ForeachLayer1, ForeachLayer2,
WhileLayer1, WhileLayer2]
# TestLayers = [WhileLayer1]
data = mx.np.random.normal(loc=0, scale=1, size=(2, 5))
states = mx.np.random.normal(loc=0, scale=1, size=(5))
for TestLayer in TestLayers:
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data, [states])
with mx.autograd.record():
res1 = layer(data, [states])
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data, [states])
with mx.autograd.record():
res2 = layer(data, [states])
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_cut_subgraph_while_loop():
class TestLayer(gluon.HybridBlock):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, data):
out1, data1 = mx.npx.while_loop(
cond=lambda i: i <= 5,
func=lambda i: (None, (i + 1, )),
loop_vars=(data, ),
max_iterations=10,
)
out2, data2 = mx.npx.while_loop(
cond=lambda i: i,
func=lambda i: (None, (i + 1, )),
loop_vars=data1[0],
max_iterations=10,
)
return data2[0]
data = mx.np.random.normal(loc=0, scale=1, size=(1, ))
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data)
with mx.autograd.record():
res1 = layer(data)
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data)
with mx.autograd.record():
res2 = layer(data)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
@mx.util.use_np
def test_cut_subgraph_cond():
class TestLayer(gluon.HybridBlock):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, data):
data1 = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data,
)
data2 = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data1,
)
return data2
data = mx.np.random.normal(loc=0, scale=1, size=(1, ))
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data)
with mx.autograd.record():
res1 = layer(data)
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data)
with mx.autograd.record():
res2 = layer(data)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
@mx.util.use_np
def test_output_format_foreach():
class TestLayer1(gluon.HybridBlock):
def __init__(self, step):
super(TestLayer1, self).__init__()
self.step = step
def forward(self, ins, states):
out, states = mx.npx.foreach(self.step, ins, states)
return out, states
def step1(data, state):
return data, state
def step2(data, state):
return [data], state
def step3(data, state):
if isinstance(state, list):
return [], [state[0] + data]
else:
return [], state + data
def step4(data, state):
if isinstance(state, list):
return [data, state[0]], state
else:
return [data, state], state
steps = [step1, step2, step3, step4]
data = mx.np.random.normal(loc=0, scale=1, size=(10, 2))
state = mx.np.random.normal(loc=0, scale=1, size=(2))
for step in steps:
layer1 = TestLayer1(step)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(data, [state])
out2, state2 = layer2(data, [state])
step_out, step_state = step(data, [state])
assert type(out1) == type(step_out)
assert type(out2) == type(step_out)
assert type(state1) == type(step_state)
assert type(state2) == type(step_state)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
layer1 = TestLayer1(step)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(data, state)
out2, state2 = layer2(data, state)
step_out, step_state = step(data, state)
assert type(out1) == type(step_out)
assert type(out2) == type(step_out)
assert type(state1) == type(step_state)
assert type(state2) == type(step_state)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
if step == step3:
continue
layer1 = TestLayer1(step)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(data, [state, [state + 1]])
out2, state2 = layer2(data, [state, [state + 1]])
step_out, step_state = step(data, [state, [state + 1]])
assert type(out1) == type(step_out)
assert type(out2) == type(step_out)
assert type(state1) == type(step_state)
assert type(state2) == type(step_state)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
if isinstance(state1[i], list):
assert_almost_equal(state1[i][0].asnumpy(), state2[i][0].asnumpy(),
rtol=0.001, atol=0.0001)
else:
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(),
rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_output_format_while():
class TestLayer1(gluon.HybridBlock):
def __init__(self, step, use_list, nested_list=False):
super(TestLayer1, self).__init__()
self.step = step
self.use_list = use_list
self.nested_list = nested_list
def forward(self, states):
def cond(state1):
scalar = mx.npx.slice(state1, begin=0, end=1)
return scalar == scalar
cond_func = cond
if self.use_list:
states = [states]
elif self.nested_list:
def cond2(state1, state2):
scalar = mx.npx.slice(state1, begin=0, end=1)
return scalar == scalar
cond_func = cond2
states = [states, [states + 1]]
out, states = mx.npx.while_loop(cond_func, self.step, states, max_iterations=5)
return out, states
def step1(state):
return state, state
def step2(state):
if isinstance(state, list):
return state, state
else:
return [state], state
def step3(state):
return [], state
steps = [step1, step2, step3]
state = mx.np.random.normal(loc=0, scale=1, size=(2))
for step in steps:
layer1 = TestLayer1(step, False)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step, False)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(state)
out2, state2 = layer2(state)
assert type(out1) == type(out2)
assert type(state1) == type(state1)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
layer1 = TestLayer1(step, True)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step, True)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(state)
out2, state2 = layer2(state)
assert type(out1) == type(out2)
assert type(state1) == type(state2)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
def step4(state, state2):
states = _as_list(state)
states.append(state2)
return state, states
def step5(state, state2):
states = _as_list(state)
states.append(state2)
if isinstance(state, list):
return state, states
else:
return [state], states
def step6(state, state2):
states = _as_list(state)
states.append(state2)
return [], states
steps = [step4, step5, step6]
for step in steps:
layer1 = TestLayer1(step, False, True)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step, False, True)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(state)
out2, state2 = layer2(state)
assert type(out1) == type(out2)
assert type(state1) == type(state2)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
if not isinstance(state1[i], list):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(),
rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_output_format_cond():
class TestLayer1(gluon.HybridBlock):
def __init__(self, func):
super(TestLayer1, self).__init__()
self.func = func
def forward(self, data):
def then_func(data):
return self.func(data)
def else_func(data):
return self.func(data)
return mx.npx.cond(lambda data: mx.npx.slice(data, begin=0, end=1),
then_func, else_func, data)
def func1(data):
return data
def func2(data):
return [data]
def func3(data):
return [data, data]
funcs = [func1, func2, func3]
data = mx.np.random.normal(loc=0, scale=1, size=(2))
for func in funcs:
layer1 = TestLayer1(func)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(func)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1 = layer1(data)
out2 = layer2(data)
func_out = func(data)
assert type(out1) == type(func_out)
assert type(out2) == type(func_out)
out1 = _as_list(out1)
out2 = _as_list(out2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_scope():
class TestBlock1(gluon.HybridBlock):
def __init__(self):
super(TestBlock1, self).__init__()
def forward(self, data):
(new_data, ) = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data,
name="my_cond",
)
return new_data
class TestBlock2(gluon.HybridBlock):
def __init__(self):
super(TestBlock2, self).__init__()
def forward(self, data):
(new_data, ) = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data,
name="my_cond",
)
return new_data
AttrScope._subgraph_names = defaultdict(int)
data = mx.np.random.normal(loc=0, scale=1, size=(1, ))
with AttrScope(__subgraph_name__="my_cond"):
block1 = TestBlock1()
block1.initialize(ctx=default_context())
block1.hybridize()
_ = block1(data)
block2 = TestBlock2()
block2.initialize(ctx=default_context())
block2.hybridize()
_ = block2(data)
assert len(AttrScope._subgraph_names) == 3
assert AttrScope._subgraph_names['my_cond$my_cond_else'] == 2
assert AttrScope._subgraph_names['my_cond$my_cond_pred'] == 2
assert AttrScope._subgraph_names['my_cond$my_cond_then'] == 2
class RNNLayer(gluon.HybridBlock):
def __init__(self, cell_type, hidden_size):
super(RNNLayer, self).__init__()
self.cell = cell_type(hidden_size)
def forward(self, inputs, states):
out, states = mx.npx.foreach(self.cell, inputs, states)
return out
def infer_shape(self, input, *args):
self.cell.infer_shape(0, input, False)
@mx.util.use_np
def check_rnn(cell_type, num_states):
batch_size = 10
hidden_size = 100
rnn_data = mx.np.random.normal(loc=0, scale=1, size=(5, batch_size, 50))
state_shape = (batch_size, hidden_size)
states = [mx.np.random.normal(loc=0, scale=1, size=state_shape) for i in range(num_states)]
layer = RNNLayer(cell_type, hidden_size)
layer.infer_shape(rnn_data)
layer.initialize(ctx=default_context())
res1 = layer(rnn_data, states)
params1 = layer.collect_params()
orig_params1 = copy.deepcopy(params1)
trainer = gluon.Trainer(params1, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res1 = layer(rnn_data, states)
res1.backward()
trainer.step(batch_size)
configs = [
{},
{'inline_limit': 0},
{'static_alloc': True},
{'static_alloc': True, 'static_shape': True} ]
for config in configs:
layer = RNNLayer(cell_type, hidden_size)
layer.infer_shape(rnn_data)
layer.initialize(ctx=default_context())
layer.hybridize(**config)
res2 = layer(rnn_data, states)
params2 = layer.collect_params()
for key, val in orig_params1.items():
params2[key].set_data(copy.deepcopy(val.data()))
trainer = gluon.Trainer(params2, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res2 = layer(rnn_data, states)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
res2.backward()
trainer.step(batch_size)
for key, val in params1.items():
weight1 = val.data()
weight2 = params2[key].data()
assert_almost_equal(weight1.asnumpy(), weight2.asnumpy(),
rtol=1e-3, atol=1e-3)
def test_rnn():
cell_types = [(gluon.rnn.RNNCell, 1), (gluon.rnn.LSTMCell, 2),
(gluon.rnn.GRUCell, 1)]
for cell_type, num_states in cell_types:
check_rnn(cell_type, num_states)
| apache-2.0 | -8,095,221,295,236,106,000 | 35.585014 | 104 | 0.55321 | false |
KaelChen/numpy | numpy/ma/tests/test_mrecords.py | 64 | 20867 | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for mrecords.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
import warnings
import pickle
import numpy as np
import numpy.ma as ma
from numpy import recarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma import masked, nomask
from numpy.testing import TestCase, run_module_suite
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
from numpy.ma.mrecords import (
MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords,
addfield
)
from numpy.ma.testutils import (
assert_, assert_equal,
assert_equal_records,
)
class TestMRecords(TestCase):
# Base test class for MaskedArrays.
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.setup()
def setup(self):
# Generic setup
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = asbytes_nested(['one', 'two', 'three', 'four', 'five'])
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mask = [0, 1, 0, 0, 1]
self.base = ma.array(list(zip(ilist, flist, slist)),
mask=mask, dtype=ddtype)
def test_byview(self):
# Test creation by view
base = self.base
mbase = base.view(mrecarray)
assert_equal(mbase.recordmask, base.recordmask)
assert_equal_records(mbase._mask, base._mask)
assert_(isinstance(mbase._data, recarray))
assert_equal_records(mbase._data, base._data.view(recarray))
for field in ('a', 'b', 'c'):
assert_equal(base[field], mbase[field])
assert_equal_records(mbase.view(mrecarray), mbase)
def test_get(self):
# Tests fields retrieval
base = self.base.copy()
mbase = base.view(mrecarray)
# As fields..........
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase, field), mbase[field])
assert_equal(base[field], mbase[field])
# as elements .......
mbase_first = mbase[0]
assert_(isinstance(mbase_first, mrecarray))
assert_equal(mbase_first.dtype, mbase.dtype)
assert_equal(mbase_first.tolist(), (1, 1.1, asbytes('one')))
# Used to be mask, now it's recordmask
assert_equal(mbase_first.recordmask, nomask)
assert_equal(mbase_first._mask.item(), (False, False, False))
assert_equal(mbase_first['a'], mbase['a'][0])
mbase_last = mbase[-1]
assert_(isinstance(mbase_last, mrecarray))
assert_equal(mbase_last.dtype, mbase.dtype)
assert_equal(mbase_last.tolist(), (None, None, None))
# Used to be mask, now it's recordmask
assert_equal(mbase_last.recordmask, True)
assert_equal(mbase_last._mask.item(), (True, True, True))
assert_equal(mbase_last['a'], mbase['a'][-1])
assert_((mbase_last['a'] is masked))
# as slice ..........
mbase_sl = mbase[:2]
assert_(isinstance(mbase_sl, mrecarray))
assert_equal(mbase_sl.dtype, mbase.dtype)
# Used to be mask, now it's recordmask
assert_equal(mbase_sl.recordmask, [0, 1])
assert_equal_records(mbase_sl.mask,
np.array([(False, False, False),
(True, True, True)],
dtype=mbase._mask.dtype))
assert_equal_records(mbase_sl, base[:2].view(mrecarray))
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase_sl, field), base[:2][field])
def test_set_fields(self):
# Tests setting fields.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase = mbase.copy()
mbase.fill_value = (999999, 1e20, 'N/A')
# Change the data, the mask should be conserved
mbase.a._data[:] = 5
assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
# Change the elements, and the mask will follow
mbase.a = 1
assert_equal(mbase['a']._data, [1]*5)
assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
# Use to be _mask, now it's recordmask
assert_equal(mbase.recordmask, [False]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0),
(0, 1, 1),
(0, 0, 0),
(0, 0, 0),
(0, 1, 1)],
dtype=bool))
# Set a field to mask ........................
mbase.c = masked
# Use to be mask, and now it's still mask !
assert_equal(mbase.c.mask, [1]*5)
assert_equal(mbase.c.recordmask, [1]*5)
assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 1),
(0, 1, 1),
(0, 0, 1),
(0, 0, 1),
(0, 1, 1)],
dtype=bool))
# Set fields by slices .......................
mbase = base.view(mrecarray).copy()
mbase.a[3:] = 5
assert_equal(mbase.a, [1, 2, 3, 5, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
mbase.b[3:] = masked
assert_equal(mbase.b, base['b'])
assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
# Set fields globally..........................
ndtype = [('alpha', '|S1'), ('num', int)]
data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
rdata = data.view(MaskedRecords)
val = ma.array([10, 20, 30], mask=[1, 0, 0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rdata['num'] = val
assert_equal(rdata.num, val)
assert_equal(rdata.num.mask, [1, 0, 0])
def test_set_fields_mask(self):
# Tests setting the mask of a field.
base = self.base.copy()
# This one has already a mask....
mbase = base.view(mrecarray)
mbase['a'][-2] = masked
assert_equal(mbase.a, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])
# This one has not yet
mbase = fromarrays([np.arange(5), np.random.rand(5)],
dtype=[('a', int), ('b', float)])
mbase['a'][-2] = masked
assert_equal(mbase.a, [0, 1, 2, 3, 4])
assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])
def test_set_mask(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Set the mask to True .......................
mbase.mask = masked
assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
assert_equal(mbase['a']._mask, mbase['b']._mask)
assert_equal(mbase['a']._mask, mbase['c']._mask)
assert_equal(mbase._mask.tolist(),
np.array([(1, 1, 1)]*5, dtype=bool))
# Delete the mask ............................
mbase.mask = nomask
assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0)]*5, dtype=bool))
def test_set_mask_fromarray(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Sets the mask w/ an array
mbase.mask = [1, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])
# Yay, once more !
mbase.mask = [0, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])
def test_set_mask_fromfields(self):
mbase = self.base.copy().view(mrecarray)
nmask = np.array(
[(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],
dtype=[('a', bool), ('b', bool), ('c', bool)])
mbase.mask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
# Reinitalizes and redo
mbase.mask = False
mbase.fieldmask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
def test_set_elements(self):
base = self.base.copy()
# Set an element to mask .....................
mbase = base.view(mrecarray).copy()
mbase[-2] = masked
assert_equal(
mbase._mask.tolist(),
np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],
dtype=bool))
# Used to be mask, now it's recordmask!
assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])
# Set slices .................................
mbase = base.view(mrecarray).copy()
mbase[:2] = (5, 5, 5)
assert_equal(mbase.a._data, [5, 5, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c._data,
asbytes_nested(['5', '5', 'three', 'four', 'five']))
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
mbase = base.view(mrecarray).copy()
mbase[:2] = masked
assert_equal(mbase.a._data, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.c._data,
asbytes_nested(['one', 'two', 'three', 'four', 'five']))
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
def test_setslices_hardmask(self):
# Tests setting slices w/ hardmask.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
try:
mbase[-2:] = (5, 5, 5)
assert_equal(mbase.a._data, [1, 2, 3, 5, 5])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])
assert_equal(mbase.c._data,
asbytes_nested(['one', 'two', 'three', '5', 'five']))
assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])
assert_equal(mbase.b._mask, mbase.a._mask)
assert_equal(mbase.b._mask, mbase.c._mask)
except NotImplementedError:
# OK, not implemented yet...
pass
except AssertionError:
raise
else:
raise Exception("Flexible hard masks should be supported !")
# Not using a tuple should crash
try:
mbase[-2:] = 3
except (NotImplementedError, TypeError):
pass
else:
raise TypeError("Should have expected a readable buffer object!")
def test_hardmask(self):
# Test hardmask
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
self.assertTrue(mbase._hardmask)
mbase.mask = nomask
assert_equal_records(mbase._mask, base._mask)
mbase.soften_mask()
self.assertTrue(not mbase._hardmask)
mbase.mask = nomask
# So, the mask of a field is no longer set to nomask...
assert_equal_records(mbase._mask,
ma.make_mask_none(base.shape, base.dtype))
self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask)
assert_equal(mbase['a']._mask, mbase['b']._mask)
def test_pickling(self):
# Test pickling
base = self.base.copy()
mrec = base.view(mrecarray)
_ = pickle.dumps(mrec)
mrec_ = pickle.loads(_)
assert_equal(mrec_.dtype, mrec.dtype)
assert_equal_records(mrec_._data, mrec._data)
assert_equal(mrec_._mask, mrec._mask)
assert_equal_records(mrec_._mask, mrec._mask)
def test_filled(self):
# Test filling the array
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
mrecfilled = mrec.filled()
assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))
assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),
dtype=float))
assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),
dtype='|S8'))
def test_tolist(self):
# Test tolist.
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
assert_equal(mrec.tolist(),
[(1, 1.1, None), (2, 2.2, asbytes('two')),
(None, None, asbytes('three'))])
def test_withnames(self):
# Test the creation w/ format and names
x = mrecarray(1, formats=float, names='base')
x[0]['base'] = 10
assert_equal(x['base'][0], 10)
def test_exotic_formats(self):
# Test that 'exotic' formats are processed properly
easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])
easy[0] = masked
assert_equal(easy.filled(1).item(), (1, asbytes('1'), 1.))
solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])
solo[0] = masked
assert_equal(solo.filled(1).item(),
np.array((1,), dtype=solo.dtype).item())
mult = mrecarray(2, dtype="i4, (2,3)float, float")
mult[0] = masked
mult[1] = (1, 1, 1)
mult.filled(0)
assert_equal_records(mult.filled(0),
np.array([(0, 0, 0), (1, 1, 1)],
dtype=mult.dtype))
class TestView(TestCase):
def setUp(self):
(a, b) = (np.arange(10), np.random.rand(10))
ndtype = [('a', np.float), ('b', np.float)]
arr = np.array(list(zip(a, b)), dtype=ndtype)
mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
mrec.mask[3] = (False, True)
self.data = (mrec, a, b, arr)
def test_view_by_itself(self):
(mrec, a, b, arr) = self.data
test = mrec.view()
self.assertTrue(isinstance(test, MaskedRecords))
assert_equal_records(test, mrec)
assert_equal_records(test._mask, mrec._mask)
def test_view_simple_dtype(self):
(mrec, a, b, arr) = self.data
ntype = (np.float, 2)
test = mrec.view(ntype)
self.assertTrue(isinstance(test, ma.MaskedArray))
assert_equal(test, np.array(list(zip(a, b)), dtype=np.float))
self.assertTrue(test[3, 1] is ma.masked)
def test_view_flexible_type(self):
(mrec, a, b, arr) = self.data
alttype = [('A', np.float), ('B', np.float)]
test = mrec.view(alttype)
self.assertTrue(isinstance(test, MaskedRecords))
assert_equal_records(test, arr.view(alttype))
self.assertTrue(test['B'][3] is masked)
assert_equal(test.dtype, np.dtype(alttype))
self.assertTrue(test._fill_value is None)
##############################################################################
class TestMRecordsImport(TestCase):
# Base test class for MaskedArrays.
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.setup()
def setup(self):
# Generic setup
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(list(map(asbytes, ['one', 'two', 'three'])),
mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(asbytes('99999'), asbytes('99999.'),
asbytes('N/A')))
nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
self.data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
(mrec, nrec, _) = self.data
for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
assert_equal(getattr(mrec, f)._mask, l._mask)
# One record only
_x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],)
assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
def test_fromrecords(self):
# Test construction from records.
(mrec, nrec, ddtype) = self.data
#......
palist = [(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)]
pa = recfromrecords(palist, names='c1, c2, c3, c4')
mpa = fromrecords(palist, names='c1, c2, c3, c4')
assert_equal_records(pa, mpa)
#.....
_mrec = fromrecords(nrec)
assert_equal(_mrec.dtype, mrec.dtype)
for field in _mrec.dtype.names:
assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
_mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])
for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):
assert_equal(getattr(_mrec, f), getattr(mrec._data, n))
_mrec = fromrecords(mrec)
assert_equal(_mrec.dtype, mrec.dtype)
assert_equal_records(_mrec._data, mrec.filled())
assert_equal_records(_mrec._mask, mrec._mask)
def test_fromrecords_wmask(self):
# Tests construction from records w/ mask.
(mrec, nrec, ddtype) = self.data
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
_mrec = fromrecords(nrec.tolist(), dtype=ddtype,
mask=mrec._mask.tolist())
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
def test_fromtextfile(self):
# Tests reading from a text file.
fcontent = asbytes(
"""#
'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
'strings',1,1.0,'mixed column',,1
'with embedded "double quotes"',2,2.0,1.0,,1
'strings',3,3.0E5,3,,1
'strings',4,-1e-10,,,1
""")
import os
import tempfile
(tmp_fd, tmp_fl) = tempfile.mkstemp()
os.write(tmp_fd, fcontent)
os.close(tmp_fd)
mrectxt = fromtextfile(tmp_fl, delimitor=',', varnames='ABCDEFG')
os.remove(tmp_fl)
self.assertTrue(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
def test_addfield(self):
# Tests addfield
(mrec, nrec, ddtype) = self.data
(d, m) = ([100, 200, 300], [1, 0, 0])
mrec = addfield(mrec, ma.array(d, mask=m))
assert_equal(mrec.f3, d)
assert_equal(mrec.f3._mask, m)
def test_record_array_with_object_field():
# Trac #1839
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', np.object)])
# getting an item used to fail
y[1]
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -6,518,383,574,758,834,000 | 39.128846 | 78 | 0.511573 | false |
geopython/pywps-demo | docs/conf.py | 3 | 10208 | # -*- coding: utf-8 -*-
#
# PyWPS-Flask documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 11 21:27:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyWPS-Flask'
copyright = u'2016, PyWPS Development Team'
author = u'PyWPS Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'4.2'
# The full version, including alpha/beta/rc tags.
release = u'4.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
#html_logo = '_static/pywps.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# alabaster settings
html_theme_options = {
'show_related': True,
'travis_button': True,
'github_banner': True,
'github_user': 'geopython',
'github_repo': 'pywps-flask',
'github_button': True,
'logo': 'pywps.png',
'logo_name': False
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# alabaster settings
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyWPS-Flaskdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyWPS-Flask.tex', u'PyWPS-Flask Documentation',
u'PyWPS Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pywps-flask', u'PyWPS-Flask Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyWPS-Flask', u'PyWPS-Flask Documentation',
author, 'PyWPS-Flask', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
return "http://github.com/geopython/pywps-flask/blob/master/%s.py" % filename
| mit | 7,300,156,629,562,849,000 | 30.800623 | 81 | 0.698374 | false |
sam-m888/gprime | gprime/db/dbconst.py | 1 | 4144 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Declare constants used by database modules
"""
#-------------------------------------------------------------------------
#
# constants
#
#-------------------------------------------------------------------------
__all__ = ( 'DBPAGE', 'DBMODE', 'DBCACHE', 'DBLOCKS', 'DBOBJECTS', 'DBUNDO',
'DBEXT', 'DBMODE_R', 'DBMODE_W', 'DBUNDOFN', 'DBLOCKFN',
'DBRECOVFN','BDBVERSFN', 'DBLOGNAME', 'SCHVERSFN',
'DBBACKEND',
'PERSON_KEY', 'FAMILY_KEY', 'SOURCE_KEY', 'CITATION_KEY',
'EVENT_KEY', 'MEDIA_KEY', 'PLACE_KEY', 'REPOSITORY_KEY',
'NOTE_KEY', 'REFERENCE_KEY', 'TAG_KEY',
'TXNADD', 'TXNUPD', 'TXNDEL',
"CLASS_TO_KEY_MAP", "KEY_TO_CLASS_MAP", "KEY_TO_NAME_MAP"
)
DBEXT = ".db" # File extension to be used for database files
DBUNDOFN = "undo.db" # File name of 'undo' database
DBLOCKFN = "lock" # File name of lock file
DBRECOVFN = "need_recover" # File name of recovery file
BDBVERSFN = "bdbversion.txt"# File name of Berkeley DB version file
DBBACKEND = "database.txt" # File name of Database backend file
SCHVERSFN = "schemaversion.txt"# File name of schema version file
DBLOGNAME = ".Db" # Name of logger
DBMODE_R = "r" # Read-only access
DBMODE_W = "w" # Full Read/Write access
DBPAGE = 16384 # Size of the pages used to hold items in the database
DBMODE = 0o666 # Unix mode for database creation
DBCACHE = 0x4000000 # Size of the shared memory buffer pool
DBLOCKS = 100000 # Maximum number of locks supported
DBOBJECTS = 100000 # Maximum number of simultaneously locked objects
DBUNDO = 1000 # Maximum size of undo buffer
PERSON_KEY = 0
FAMILY_KEY = 1
SOURCE_KEY = 2
EVENT_KEY = 3
MEDIA_KEY = 4
PLACE_KEY = 5
REPOSITORY_KEY = 6
REFERENCE_KEY = 7
NOTE_KEY = 8
TAG_KEY = 9
CITATION_KEY = 10
TXNADD, TXNUPD, TXNDEL = 0, 1, 2
CLASS_TO_KEY_MAP = {"Person": PERSON_KEY,
"Family": FAMILY_KEY,
"Source": SOURCE_KEY,
"Citation": CITATION_KEY,
"Event": EVENT_KEY,
"Media": MEDIA_KEY,
"Place": PLACE_KEY,
"Repository": REPOSITORY_KEY,
"Note" : NOTE_KEY,
"Tag": TAG_KEY}
KEY_TO_CLASS_MAP = {PERSON_KEY: "Person",
FAMILY_KEY: "Family",
SOURCE_KEY: "Source",
CITATION_KEY: "Citation",
EVENT_KEY: "Event",
MEDIA_KEY: "Media",
PLACE_KEY: "Place",
REPOSITORY_KEY: "Repository",
NOTE_KEY: "Note",
TAG_KEY: "Tag"}
KEY_TO_NAME_MAP = {PERSON_KEY: 'person',
FAMILY_KEY: 'family',
EVENT_KEY: 'event',
SOURCE_KEY: 'source',
CITATION_KEY: 'citation',
PLACE_KEY: 'place',
MEDIA_KEY: 'media',
REPOSITORY_KEY: 'repository',
#REFERENCE_KEY: 'reference',
NOTE_KEY: 'note',
TAG_KEY: 'tag'}
| gpl-2.0 | -3,658,467,685,649,608,000 | 38.466667 | 82 | 0.539575 | false |
mmnelemane/neutron | neutron/plugins/embrane/plugins/embrane_fake_plugin.py | 59 | 1274 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import extraroute_db
from neutron.plugins.embrane import base_plugin as base
from neutron.plugins.embrane.l2base.fake import fake_l2_plugin as l2
from neutron.plugins.embrane.l2base.fake import fakeplugin_support as sup
class EmbraneFakePlugin(base.EmbranePlugin, extraroute_db.ExtraRoute_db_mixin,
l2.FakeL2Plugin):
_plugin_support = sup.FakePluginSupport()
def __init__(self):
'''First run plugin specific initialization, then Embrane's.'''
self.supported_extension_aliases += ["extraroute", "router"]
l2.FakeL2Plugin.__init__(self)
self._run_embrane_config()
| apache-2.0 | 8,017,312,217,118,693,000 | 41.466667 | 78 | 0.71821 | false |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tseries/plotting.py | 7 | 9969 | """
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
# TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
from pandas.formats.printing import pprint_thing
import pandas.compat as compat
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = getattr(series.resample('D'), how)().dropna()
series = getattr(series.resample(ax_freq), how)().dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.tools.plotting import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(),
series.values, **kwds)[0])
labels.append(pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, 'freq', None)
if ax_freq is not None:
break
return ax_freq
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = _get_ax_freq(ax)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: (
"t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
pylab.draw_if_interactive()
| apache-2.0 | -2,089,008,490,088,386,800 | 30.84984 | 79 | 0.597251 | false |
tjsavage/full_nonrel_starter | django/core/exceptions.py | 292 | 2767 | """
Global Django exception and warning classes.
"""
class DjangoRuntimeWarning(RuntimeWarning):
pass
class ObjectDoesNotExist(Exception):
"The requested object does not exist"
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"The query returned multiple objects when only one was expected."
pass
class SuspiciousOperation(Exception):
"The user did something suspicious"
pass
class PermissionDenied(Exception):
"The user did not have permission to do that"
pass
class ViewDoesNotExist(Exception):
"The requested view does not exist"
pass
class MiddlewareNotUsed(Exception):
"This middleware is not used in this server configuration"
pass
class ImproperlyConfigured(Exception):
"Django is somehow improperly configured"
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
import operator
from django.utils.encoding import force_unicode
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
if hasattr(self, 'message_dict'):
if error_dict:
for k, v in self.message_dict.items():
error_dict.setdefault(k, []).extend(v)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
| bsd-3-clause | -3,585,812,805,993,127,400 | 30.804598 | 77 | 0.639321 | false |
RUedx/configuration | tests/test_mongodb_replica_set.py | 25 | 5499 | # Tests for mongodb_replica_set ansible module
#
# How to run these tests:
# 1. move this file to playbooks/library
# 2. rename mongodb_replica_set to mongodb_replica_set.py
# 3. python test_mongodb_replica_set.py
import mongodb_replica_set as mrs
import unittest, mock
from urllib import quote_plus
from copy import deepcopy
class TestNoPatchingMongodbReplicaSet(unittest.TestCase):
def test_host_port_transformation(self):
unfixed = {
'members': [
{'host': 'foo.bar'},
{'host': 'bar.baz', 'port': 1234},
{'host': 'baz.bing:54321'}
]}
fixed = {
'members': [
{'host': 'foo.bar:27017'},
{'host': 'bar.baz:1234'},
{'host': 'baz.bing:54321'}
]}
mrs.fix_host_port(unfixed)
self.assertEqual(fixed, unfixed)
fixed_2 = deepcopy(fixed)
mrs.fix_host_port(fixed_2)
self.assertEqual(fixed, fixed_2)
def test_member_id_managed(self):
new = [
{'host': 'foo.bar', '_id': 1},
{'host': 'bar.baz'},
{'host': 'baz.bing'}
]
old = [
{'host': 'baz.bing', '_id': 0}
]
fixed = deepcopy(new)
mrs.set_member_ids(fixed, old)
#test that each id is unique
unique_ids = {m['_id'] for m in fixed}
self.assertEqual(len(unique_ids), len(new))
#test that it "prefers" the "matching" one in old_members
self.assertEqual(fixed[0]['_id'], new[0]['_id'])
self.assertEqual(fixed[2]['_id'], old[0]['_id'])
self.assertIn('_id', fixed[1])
def test_mongo_uri_escaped(self):
host = username = password = auth_database = ':!@#$%/'
port = 1234
uri = mrs.get_mongo_uri(host=host, port=port, username=username, password=password, auth_database=auth_database)
self.assertEqual(uri, "mongodb://{un}:{pw}@{host}:{port}/{db}".format(
un=quote_plus(username), pw=quote_plus(password),
host=quote_plus(host), port=port, db=quote_plus(auth_database),
))
rs_id = 'a replset id'
members = [
{'host': 'foo.bar:1234'},
{'host': 'bar.baz:4321'},
]
old_rs_config = {
'version': 1,
'_id': rs_id,
'members': [
{'_id': 0, 'host': 'foo.bar:1234',},
{'_id': 1, 'host': 'bar.baz:4321',},
]
}
new_rs_config = {
'version': 2,
'_id': rs_id,
'members': [
{'_id': 0, 'host': 'foo.bar:1234',},
{'_id': 1, 'host': 'bar.baz:4321',},
{'_id': 2, 'host': 'baz.bing:27017',},
]
}
rs_config = {
'members': [
{'host': 'foo.bar', 'port': 1234,},
{'host': 'bar.baz', 'port': 4321,},
{'host': 'baz.bing', 'port': 27017,},
]
}
def init_replset_mock(f):
get_replset_initialize_mock = mock.patch.object(mrs, 'get_replset',
side_effect=(None, deepcopy(new_rs_config)))
initialize_replset_mock = mock.patch.object(mrs, 'initialize_replset')
return get_replset_initialize_mock(initialize_replset_mock(f))
def update_replset_mock(f):
get_replset_update_mock = mock.patch.object(mrs, 'get_replset',
side_effect=(deepcopy(old_rs_config), deepcopy(new_rs_config)))
reconfig_replset_mock = mock.patch.object(mrs, 'reconfig_replset')
return get_replset_update_mock(reconfig_replset_mock(f))
@mock.patch.object(mrs, 'get_rs_config_id', return_value=rs_id)
@mock.patch.object(mrs, 'client', create=True)
@mock.patch.object(mrs, 'module', create=True)
class TestPatchingMongodbReplicaSet(unittest.TestCase):
@update_replset_mock
def test_version_managed(self, _1, _2, module, *args):
# Version set automatically on initialize
mrs.update_replset(deepcopy(rs_config))
new_version = module.exit_json.call_args[1]['config']['version']
self.assertEqual(old_rs_config['version'], new_version - 1)
@init_replset_mock
def test_doc_id_managed_on_initialize(self, _1, _2, module, *args):
#old_rs_config provided by init_replset_mock via mrs.get_replset().
#That returns None on the first call, so it falls through to get_rs_config_id(),
#which is also mocked.
mrs.update_replset(deepcopy(rs_config))
new_id = module.exit_json.call_args[1]['config']['_id']
self.assertEqual(rs_id, new_id)
@update_replset_mock
def test_doc_id_managed_on_update(self, _1, _2, module, *args):
#old_rs_config provided by update_replset_mock via mrs.get_replset()
mrs.update_replset(deepcopy(rs_config))
new_id = module.exit_json.call_args[1]['config']['_id']
self.assertEqual(rs_id, new_id)
@init_replset_mock
def test_initialize_if_necessary(self, initialize_replset, _2, module, *args):
mrs.update_replset(deepcopy(rs_config))
self.assertTrue(initialize_replset.called)
#self.assertFalse(reconfig_replset.called)
@update_replset_mock
def test_reconfig_if_necessary(self, reconfig_replset, _2, module, *args):
mrs.update_replset(deepcopy(rs_config))
self.assertTrue(reconfig_replset.called)
#self.assertFalse(initialize_replset.called)
@update_replset_mock
def test_not_changed_when_docs_match(self, _1, _2, module, *args):
rs_config = {'members': members} #This way the docs "match", but aren't identical
mrs.update_replset(deepcopy(rs_config))
changed = module.exit_json.call_args[1]['changed']
self.assertFalse(changed)
@update_replset_mock
def test_ignores_magic_given_full_doc(self, _1, _2, module, _3, get_rs_config_id, *args):
mrs.update_replset(deepcopy(new_rs_config))
new_doc = module.exit_json.call_args[1]['config']
self.assertEqual(new_doc, new_rs_config)
self.assertFalse(get_rs_config_id.called)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,217,086,046,112,699,000 | 32.126506 | 116 | 0.643753 | false |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/_bootlocale.py | 40 | 1801 | """A minimal subset of the locale module used at interpreter startup
(imported by the _io module), in order to reduce startup time.
Don't import directly from third-party code; use the `locale` module instead!
"""
import sys
import _locale
if sys.platform.startswith("win"):
def getpreferredencoding(do_setlocale=True):
if sys.flags.utf8_mode:
return 'UTF-8'
return _locale._getdefaultlocale()[1]
else:
try:
_locale.CODESET
except AttributeError:
if hasattr(sys, 'getandroidapilevel'):
# On Android langinfo.h and CODESET are missing, and UTF-8 is
# always used in mbstowcs() and wcstombs().
def getpreferredencoding(do_setlocale=True):
return 'UTF-8'
else:
def getpreferredencoding(do_setlocale=True):
if sys.flags.utf8_mode:
return 'UTF-8'
# This path for legacy systems needs the more complex
# getdefaultlocale() function, import the full locale module.
import locale
return locale.getpreferredencoding(do_setlocale)
else:
def getpreferredencoding(do_setlocale=True):
assert not do_setlocale
if sys.flags.utf8_mode:
return 'UTF-8'
result = _locale.nl_langinfo(_locale.CODESET)
if not result and sys.platform == 'darwin':
# nl_langinfo can return an empty string
# when the setting has an invalid value.
# Default to UTF-8 in that case because
# UTF-8 is the default charset on OSX and
# returning nothing will crash the
# interpreter.
result = 'UTF-8'
return result
| apache-2.0 | 6,849,969,450,716,602,000 | 38.152174 | 77 | 0.588007 | false |
mixja/eap-sim-lab | lib/pyscard-1.6.16/build/lib.macosx-10.10-x86_64-2.7/smartcard/CardConnection.py | 2 | 7754 | """The CardConnection abstract class manages connections with a card and
apdu transmission.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from smartcard.CardConnectionEvent import CardConnectionEvent
from smartcard.Exceptions import SmartcardException
from smartcard.Observer import Observer
from smartcard.Observer import Observable
class CardConnection(Observable):
"""Card connection abstract class.
Known subclasses: smartcard.pcsc.PCSCCardConnection
"""
T0_protocol = 0x00000001
T1_protocol = 0x00000002
RAW_protocol = 0x00010000
T15_protocol = 0x00000008
def __init__(self, reader):
"""Construct a new card connection.
readerName: name of the reader in which the smartcard to connect
to is located.
"""
Observable.__init__(self)
self.reader = reader
self.errorcheckingchain = None
self.defaultprotocol = CardConnection.T0_protocol |\
CardConnection.T1_protocol
def __del__(self):
"""Connect to card."""
pass
def addSWExceptionToFilter(self, exClass):
"""Add a status word exception class to be filtered.
exClass: the class to filter, e.g.
smartcard.sw.SWException.WarningProcessingException
Filtered exceptions will not be raised when encountered in the
error checking chain."""
if None != self.errorcheckingchain:
self.errorcheckingchain[0].addFilterException(exClass)
def addObserver(self, observer):
"""Add a CardConnection observer."""
Observable.addObserver(self, observer)
def deleteObserver(self, observer):
"""Remove a CardConnection observer."""
Observable.deleteObserver(self, observer)
def connect(self, protocol=None, mode=None, disposition=None):
"""Connect to card.
protocol: a bit mask of the protocols to use, from
CardConnection.T0_protocol, CardConnection.T1_protocol,
CardConnection.RAW_protocol, CardConnection.T15_protocol
mode: passed as-is to the PC/SC layer
"""
Observable.setChanged(self)
Observable.notifyObservers(self, CardConnectionEvent('connect'))
def disconnect(self):
"""Disconnect from card."""
Observable.setChanged(self)
Observable.notifyObservers(self, CardConnectionEvent('disconnect'))
def getATR(self):
"""Return card ATR"""
pass
def getProtocol(self):
"""Return bit mask for the protocol of connection, or None if no
protocol set. The return value is a bit mask of
CardConnection.T0_protocol, CardConnection.T1_protocol,
CardConnection.RAW_protocol, CardConnection.T15_protocol
"""
return self.defaultprotocol
def getReader(self):
"""Return card connection reader"""
return self.reader
def setErrorCheckingChain(self, errorcheckingchain):
"""Add an error checking chain.
errorcheckingchain: a smartcard.sw.ErrorCheckingChain object The
error checking strategies in errorchecking chain will be tested
with each received response APDU, and a
smartcard.sw.SWException.SWException will be raised upon
error."""
self.errorcheckingchain = errorcheckingchain
def setProtocol(self, protocol):
"""Set protocol for card connection.
protocol: a bit mask of CardConnection.T0_protocol,
CardConnection.T1_protocol, CardConnection.RAW_protocol,
CardConnection.T15_protocol e.g.
setProtocol(CardConnection.T1_protocol |
CardConnection.T0_protocol) """
self.defaultprotocol = protocol
def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
bytes: list of bytes to transmit
protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[bytes, protocol]))
data, sw1, sw2 = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
[data, sw1, sw2]))
if None != self.errorcheckingchain:
self.errorcheckingchain[0](data, sw1, sw2)
return data, sw1, sw2
def doTransmit(self, bytes, protocol):
"""Performs the command APDU transmission.
Subclasses must override this method for implementing apdu
transmission."""
pass
def control(self, controlCode, bytes=[]):
"""Send a control command and buffer. Internally calls doControl()
class method and notify observers upon command/response events.
Subclasses must override the doControl() class method.
controlCode: command code
bytes: list of bytes to transmit
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[controlCode, bytes]))
data = self.doControl(controlCode, bytes)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
data))
if None != self.errorcheckingchain:
self.errorcheckingchain[0](data)
return data
def doControl(self, controlCode, bytes):
"""Performs the command control.
Subclasses must override this method for implementing control."""
pass
def getAttrib(self, attribId):
"""return the requested attribute
attribId: attribute id like SCARD_ATTR_VENDOR_NAME
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'attrib',
[attribId]))
data = self.doGetAttrib(attribId)
if None != self.errorcheckingchain:
self.errorcheckingchain[0](data)
return data
def doGetAttrib(self, attribId):
"""Performs the command get attrib.
Subclasses must override this method for implementing get attrib."""
pass
| mit | -4,269,073,220,857,971,700 | 35.92381 | 76 | 0.630513 | false |
mcepl/youtube-dl | youtube_dl/extractor/charlierose.py | 12 | 1709 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import remove_end
class CharlieRoseIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/video(?:s|/player)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://charlierose.com/videos/27996',
'md5': 'fda41d49e67d4ce7c2411fd2c4702e09',
'info_dict': {
'id': '27996',
'ext': 'mp4',
'title': 'Remembering Zaha Hadid',
'thumbnail': 're:^https?://.*\.jpg\?\d+',
'description': 'We revisit past conversations with Zaha Hadid, in memory of the world renowned Iraqi architect.',
'subtitles': {
'en': [{
'ext': 'vtt',
}],
},
},
}, {
'url': 'https://charlierose.com/videos/27996',
'only_matching': True,
}]
_PLAYER_BASE = 'https://charlierose.com/video/player/%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(self._PLAYER_BASE % video_id, video_id)
title = remove_end(self._og_search_title(webpage), ' - Charlie Rose')
info_dict = self._parse_html5_media_entries(
self._PLAYER_BASE % video_id, webpage, video_id,
m3u8_entry_protocol='m3u8_native')[0]
self._sort_formats(info_dict['formats'])
self._remove_duplicate_formats(info_dict['formats'])
info_dict.update({
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
})
return info_dict
| unlicense | 2,609,851,148,422,163,000 | 32.509804 | 125 | 0.547689 | false |
ttfseiko/openerp-trunk | openerp/addons/resource/faces/observer.py | 433 | 2328 | #@+leo-ver=4
#@+node:@file observer.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains the base class for all observer objects
"""
#@<< Imports >>
#@+node:<< Imports >>
#@-node:<< Imports >>
#@nl
_is_source_ = True
#@+others
#@+node:class Observer
class Observer(object):
"""
Base Class for all charts and reports.
@var visible: Specifies if the observer is visible
at the navigation bar inside the gui.
@var link_view: syncronizes the marked objects in all views.
"""
#@ << declarations >>
#@+node:<< declarations >>
__type_name__ = None
__type_image__ = None
visible = True
link_view = True
__attrib_completions__ = { "visible" : 'visible = False',
"link_view" : "link_view = False" }
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:register_editors
def register_editors(cls, registry):
pass
register_editors = classmethod(register_editors)
#@-node:register_editors
#@-others
#@-node:class Observer
#@-others
factories = { }
clear_cache_funcs = {}
#@-node:@file observer.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,827,398,224,180,213,000 | 27.740741 | 76 | 0.588058 | false |
qmagico/sampleappqm | src/django/db/backends/postgresql_psycopg2/introspection.py | 82 | 4121 | from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
null_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [tuple([item for item in line[:6]] + [null_map[line[0]]==u'YES'])
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are single-item lists, so grab the single item.
relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
return indexes
| mit | -7,720,499,757,029,289,000 | 42.840426 | 95 | 0.581655 | false |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.2/Lib/multifile.py | 67 | 4982 | """A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
from warnings import warn
warn("the multifile module has been deprecated since Python 2.5",
DeprecationWarning, stacklevel=2)
del warn
__all__ = ["MultiFile","Error"]
class Error(Exception):
pass
class MultiFile:
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = []
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = []
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
marker = line.rstrip()
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i, sep in enumerate(reversed(self.stack)):
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return ''.join(self.readlines())
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.append(sep)
if self.seekable:
self.posstack.append(self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
self.stack.pop()
if self.seekable:
self.start = self.posstack.pop()
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
| bsd-2-clause | -6,837,340,273,322,319,000 | 28.753086 | 76 | 0.530711 | false |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/samples/apps/marketplace_sample/gdata/tlslite/BaseDB.py | 238 | 3508 | """Base class for SharedKeyDB and VerifierDB."""
import anydbm
import thread
class BaseDB:
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = thread.allocate_lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = anydbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = anydbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return self.db.has_key(username)
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = self.db.keys()
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames | gpl-3.0 | 6,861,533,148,632,504,000 | 27.25 | 78 | 0.527081 | false |
dunkhong/grr | grr/server/grr_response_server/gui/api_plugins/artifact_test.py | 2 | 4766 | #!/usr/bin/env python
"""This modules contains tests for artifact API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from absl import app
from grr_response_core import config
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_server import artifact
from grr_response_server.gui import api_test_lib
from grr_response_server.gui.api_plugins import artifact as artifact_plugin
from grr.test_lib import artifact_test_lib
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
@db_test_lib.TestDatabases()
class ApiListArtifactsHandlerTest(flow_test_lib.FlowTestsBaseclass):
"""Test for ApiListArtifactsHandler."""
def setUp(self):
super(ApiListArtifactsHandlerTest, self).setUp()
self.handler = artifact_plugin.ApiListArtifactsHandler()
@artifact_test_lib.PatchCleanArtifactRegistry
def testNoArtifacts(self, _):
result = self.handler.Handle(self.handler.args_type(), token=self.token)
self.assertEqual(result.total_count, 0)
self.assertEqual(result.items, [])
@artifact_test_lib.PatchDefaultArtifactRegistry
def testPrepackagedArtifacts(self, registry):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
registry.AddFileSource(test_artifacts_file)
result = self.handler.Handle(self.handler.args_type(), token=self.token)
# Some artifacts are guaranteed to be returned, as they're defined in
# the test_data/artifacts/test_artifacts.json.
self.assertTrue(result.total_count)
# Check that FakeArtifact artifact exists. It's guaranteed to exist, since
# it's defined in test_data/artifacts/test_artifacts.json.
for item in result.items:
if item.artifact.name == "FakeArtifact":
fake_artifact = item
self.assertTrue(fake_artifact)
self.assertTrue(fake_artifact.HasField("is_custom"))
self.assertFalse(fake_artifact.is_custom)
self.assertTrue(fake_artifact.artifact.doc)
self.assertTrue(fake_artifact.artifact.labels)
self.assertTrue(fake_artifact.artifact.supported_os)
@db_test_lib.TestDatabases()
class ApiUploadArtifactHandlerTest(api_test_lib.ApiCallHandlerTest):
def setUp(self):
super(ApiUploadArtifactHandlerTest, self).setUp()
self.handler = artifact_plugin.ApiUploadArtifactHandler()
@artifact_test_lib.PatchCleanArtifactRegistry
def testUpload(self, registry):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifact.json")
with open(test_artifacts_file, "rb") as fd:
args = self.handler.args_type(artifact=fd.read())
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
registry.GetArtifact("TestDrivers")
self.handler.Handle(args, token=self.token)
registry.GetArtifact("TestDrivers")
@db_test_lib.TestDatabases()
@artifact_test_lib.PatchDefaultArtifactRegistry
class ApiDeleteArtifactsHandlerTest(api_test_lib.ApiCallHandlerTest):
def setUp(self):
super(ApiDeleteArtifactsHandlerTest, self).setUp()
self.handler = artifact_plugin.ApiDeleteArtifactsHandler()
def UploadTestArtifacts(self):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
with io.open(test_artifacts_file, mode="r", encoding="utf-8") as fd:
artifact.UploadArtifactYamlFile(fd.read())
def testDeletesArtifactsWithSpecifiedNames(self, registry):
self.UploadTestArtifacts()
count = len(registry.GetArtifacts(reload_datastore_artifacts=True))
args = self.handler.args_type(
names=["TestFilesArtifact", "WMIActiveScriptEventConsumer"])
self.handler.Handle(args, token=self.token)
new_count = len(registry.GetArtifacts())
# Check that we deleted exactly 2 artifacts.
self.assertEqual(new_count, count - 2)
def testDeleteDependency(self, registry):
self.UploadTestArtifacts()
args = self.handler.args_type(names=["TestAggregationArtifact"])
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testDeleteNonExistentArtifact(self, registry):
self.UploadTestArtifacts()
args = self.handler.args_type(names=["NonExistentArtifact"])
e = self.assertRaises(ValueError)
with e:
self.handler.Handle(args, token=self.token)
self.assertEqual(
str(e.exception),
"Artifact(s) to delete (NonExistentArtifact) not found.")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 9,059,644,636,355,025,000 | 34.044118 | 78 | 0.727235 | false |
ngageoint/voxel-globe | voxel_globe/tiepoint_registration/views.py | 2 | 2848 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
def tiepoint_registration_1(request):
from voxel_globe.meta import models
image_set_list = models.ImageSet.objects.all()
return render(request, 'tiepoint_registration/html/tiepoint_registration_1.html',
{'image_set_list':image_set_list})
def tiepoint_registration_2(request, image_set_id):
from voxel_globe.meta import models
camera_set_list = models.ImageSet.objects.get(id=image_set_id).cameras.all()
return render(request, 'tiepoint_registration/html/tiepoint_registration_2.html',
{'camera_set_list':camera_set_list,
'image_set_id':image_set_id})
def tiepoint_registration_3(request, image_set_id, camera_set_id):
from voxel_globe.tiepoint_registration import tasks
image_set_id = int(image_set_id)
t = tasks.tiepoint_registration.apply_async(args=(image_set_id,camera_set_id), user=request.user)
return render(request, 'tiepoint_registration/html/tiepoint_registration_3.html',
{'task_id': t.task_id})
def tiepoint_error_1(request):
from voxel_globe.meta import models
image_set_list = models.ImageSet.objects.all()
return render(request, 'tiepoint_registration/html/tiepoint_error_1.html',
{'image_set_list':image_set_list})
def tiepoint_error_2(request, image_set_id):
from voxel_globe.meta import models
camera_set_list = models.ImageSet.objects.get(id=image_set_id).cameras.all()
return render(request, 'tiepoint_registration/html/tiepoint_error_2.html',
{'camera_set_list':camera_set_list,
'image_set_id':image_set_id})
def tiepoint_error_3(request, image_set_id, camera_set_id):
from voxel_globe.meta import models
scene_list = models.Scene.objects.all()
return render(request, 'tiepoint_registration/html/tiepoint_error_3.html',
{'scene_list':scene_list,
'camera_set_id':camera_set_id,
'image_set_id':image_set_id})
def tiepoint_error_4(request, image_set_id, camera_set_id, scene_id):
from voxel_globe.tiepoint_registration import tasks
image_set_id = int(image_set_id)
t = tasks.tiepoint_error_calculation.apply_async(args=(image_set_id,
camera_set_id,
scene_id),
user=request.user)
return render(request, 'tiepoint_registration/html/tiepoint_error_4.html',
{'task_id': t.task_id})
def order_status(request, task_id):
from celery.result import AsyncResult
task = AsyncResult(task_id)
return render(request, 'task/html/task_3d_error_results.html',
{'task': task}) | mit | -6,116,839,462,472,354,000 | 40.289855 | 99 | 0.655548 | false |
freeflightsim/fg-flying-club | google_appengine/lib/webob/tests/test_response.py | 32 | 1407 | from webob import *
def simple_app(environ, start_response):
start_response('200 OK', [
('Content-Type', 'text/html; charset=utf8'),
])
return ['OK']
def test_response():
req = Request.blank('/')
res = req.get_response(simple_app)
assert res.status == '200 OK'
assert res.status_int == 200
assert res.body == "OK"
assert res.charset == 'utf8'
assert res.content_type == 'text/html'
res.status = 404
assert res.status == '404 Not Found'
assert res.status_int == 404
res.body = 'Not OK'
assert ''.join(res.app_iter) == 'Not OK'
res.charset = 'iso8859-1'
assert res.headers['content-type'] == 'text/html; charset=iso8859-1'
res.content_type = 'text/xml'
assert res.headers['content-type'] == 'text/xml; charset=iso8859-1'
res.headers = {'content-type': 'text/html'}
assert res.headers['content-type'] == 'text/html'
assert res.headerlist == [('content-type', 'text/html')]
res.set_cookie('x', 'y')
assert res.headers['set-cookie'].strip(';') == 'x=y; Path=/'
res = Response('a body', '200 OK', content_type='text/html')
res.encode_content()
assert res.content_encoding == 'gzip'
assert res.body == '\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xffKTH\xcaO\xa9\x04\x00\xf6\x86GI\x06\x00\x00\x00'
res.decode_content()
assert res.content_encoding is None
assert res.body == 'a body'
| gpl-2.0 | 2,741,876,406,248,880,000 | 37.027027 | 111 | 0.622601 | false |
wreckJ/intellij-community | python/helpers/pydev/pydevd_console.py | 42 | 7593 | '''An helper file for the pydev debugger (REPL) console
'''
from code import InteractiveConsole
import sys
import traceback
import _pydev_completer
from pydevd_tracing import GetExceptionTracebackStr
from pydevd_vars import makeValidXmlValue
from pydev_imports import Exec
from pydevd_io import IOBuf
from pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from pydev_override import overrides
import pydevd_save_locals
CONSOLE_OUTPUT = "output"
CONSOLE_ERROR = "error"
#=======================================================================================================================
# ConsoleMessage
#=======================================================================================================================
class ConsoleMessage:
"""Console Messages
"""
def __init__(self):
self.more = False
# List of tuple [('error', 'error_message'), ('message_list', 'output_message')]
self.console_messages = []
def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m))
def update_more(self, more):
"""more is set to true if further input is required from the user
else more is set to false
"""
self.more = more
def toXML(self):
"""Create an XML for console message_list, error and more (true/false)
<xml>
<message_list>console message_list</message_list>
<error>console error</error>
<more>true/false</more>
</xml>
"""
makeValid = makeValidXmlValue
xml = '<xml><more>%s</more>' % (self.more)
for message_type, message in self.console_messages:
xml += '<%s message="%s"></%s>' % (message_type, makeValid(message), message_type)
xml += '</xml>'
return xml
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
overrides(BaseStdIn.readline)
def readline(self, *args, **kwargs):
sys.stderr.write('Warning: Reading from stdin is still not supported in this console.\n')
return '\n'
#=======================================================================================================================
# DebugConsole
#=======================================================================================================================
class DebugConsole(InteractiveConsole, BaseInterpreterInterface):
"""Wrapper around code.InteractiveConsole, in order to send
errors and outputs to the debug console
"""
overrides(BaseInterpreterInterface.createStdIn)
def createStdIn(self):
return DebugConsoleStdIn() #For now, raw_input is not supported in this console.
overrides(InteractiveConsole.push)
def push(self, line, frame):
"""Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
Return boolean (True if more input is required else False),
output_messages and input_messages
"""
more = False
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
try:
self.frame = frame
out = sys.stdout = IOBuf()
err = sys.stderr = IOBuf()
more = self.addExec(line)
except Exception:
exc = GetExceptionTracebackStr()
err.buflist.append("Internal Error: %s" % (exc,))
finally:
#Remove frame references.
self.frame = None
frame = None
sys.stdout = original_stdout
sys.stderr = original_stderr
return more, out.buflist, err.buflist
overrides(BaseInterpreterInterface.doAddExec)
def doAddExec(self, line):
return InteractiveConsole.push(self, line)
overrides(InteractiveConsole.runcode)
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
Exec(code, self.frame.f_globals, self.frame.f_locals)
pydevd_save_locals.save_locals(self.frame)
except SystemExit:
raise
except:
self.showtraceback()
#=======================================================================================================================
# InteractiveConsoleCache
#=======================================================================================================================
class InteractiveConsoleCache:
thread_id = None
frame_id = None
interactive_console_instance = None
#Note: On Jython 2.1 we can't use classmethod or staticmethod, so, just make the functions below free-functions.
def get_interactive_console(thread_id, frame_id, frame, console_message):
"""returns the global interactive console.
interactive console should have been initialized by this time
"""
if InteractiveConsoleCache.thread_id == thread_id and InteractiveConsoleCache.frame_id == frame_id:
return InteractiveConsoleCache.interactive_console_instance
InteractiveConsoleCache.interactive_console_instance = DebugConsole()
InteractiveConsoleCache.thread_id = thread_id
InteractiveConsoleCache.frame_id = frame_id
console_stacktrace = traceback.extract_stack(frame, limit=1)
if console_stacktrace:
current_context = console_stacktrace[0] # top entry from stacktrace
context_message = 'File "%s", line %s, in %s' % (current_context[0], current_context[1], current_context[2])
console_message.add_console_message(CONSOLE_OUTPUT, "[Current context]: %s" % (context_message,))
return InteractiveConsoleCache.interactive_console_instance
def clear_interactive_console():
InteractiveConsoleCache.thread_id = None
InteractiveConsoleCache.frame_id = None
InteractiveConsoleCache.interactive_console_instance = None
def execute_console_command(frame, thread_id, frame_id, line):
"""fetch an interactive console instance from the cache and
push the received command to the console.
create and return an instance of console_message
"""
console_message = ConsoleMessage()
interpreter = get_interactive_console(thread_id, frame_id, frame, console_message)
more, output_messages, error_messages = interpreter.push(line, frame)
console_message.update_more(more)
for message in output_messages:
console_message.add_console_message(CONSOLE_OUTPUT, message)
for message in error_messages:
console_message.add_console_message(CONSOLE_ERROR, message)
return console_message
def get_completions(frame, act_tok):
""" fetch all completions, create xml for the same
return the completions xml
"""
return _pydev_completer.GenerateCompletionsAsXML(frame, act_tok)
| apache-2.0 | 4,058,554,590,669,140,000 | 34.816038 | 120 | 0.582905 | false |
aringh/odl | odl/contrib/solvers/spdhg/misc.py | 1 | 22813 | # Copyright 2014-2018 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Functions for folders and files."""
from __future__ import print_function
from builtins import super
import numpy as np
import odl
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
from skimage.io import imsave
__all__ = ('total_variation', 'TotalVariationNonNegative', 'bregman',
'save_image', 'save_signal', 'divide_1Darray_equally', 'Blur2D',
'KullbackLeiblerSmooth')
def save_image(image, name, folder, fignum, cmap='gray', clim=None):
matplotlib.rc('text', usetex=False)
fig = plt.figure(fignum)
plt.clf()
image.show(name, cmap=cmap, fig=fig)
fig.savefig('{}/{}_fig.png'.format(folder, name), bbox_inches='tight')
if clim is None:
x = image - np.min(image)
if np.max(x) > 1e-4:
x /= np.max(x)
else:
x = (image - clim[0]) / (clim[1] - clim[0])
x = np.minimum(np.maximum(x, 0), 1)
imsave('{}/{}.png'.format(folder, name), np.rot90(x, 1))
def save_signal(signal, name, folder, fignum):
matplotlib.rc('text', usetex=False)
fig = plt.figure(fignum)
plt.clf()
signal.show(name, fig=fig)
fig.savefig('{}/{}_fig.png'.format(folder, name), bbox_inches='tight')
def bregman(f, v, subgrad):
return (odl.solvers.FunctionalQuadraticPerturb(f, linear_term=-subgrad) -
f(v) + subgrad.inner(v))
def partition_1d(arr, slices):
return tuple(arr[slc] for slc in slices)
def partition_equally_1d(arr, nparts, order='interlaced'):
if order == 'block':
stride = int(np.ceil(arr.size / nparts))
slc_list = [slice(i * stride, (i + 1) * stride) for i in range(nparts)]
elif order == 'interlaced':
slc_list = [slice(i, len(arr), nparts) for i in range(nparts)]
else:
raise ValueError
return partition_1d(arr, slc_list)
def divide_1Darray_equally(ind, nsub):
"""Divide an array into equal chunks to be used for instance in OSEM.
Parameters
----------
ind : ndarray
input array
nsubsets : int
number of subsets to be divided into
Returns
-------
sub2ind : list
list of indices for each subset
ind2sub : list
list of subsets for each index
"""
n_ind = len(ind)
sub2ind = partition_equally_1d(ind, nsub, order='interlaced')
ind2sub = []
for i in range(n_ind):
ind2sub.append([])
for i in range(nsub):
for j in sub2ind[i]:
ind2sub[j].append(i)
return (sub2ind, ind2sub)
def total_variation(domain, grad=None):
"""Total variation functional.
Parameters
----------
domain : odlspace
domain of TV functional
grad : gradient operator, optional
Gradient operator of the total variation functional. This may be any
linear operator and thereby generalizing TV. default=forward
differences with Neumann boundary conditions
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tv = spdhg.total_variation(space)
>>> x = space.one()
>>> tv(x) < 1e-10
"""
if grad is None:
grad = odl.Gradient(domain, method='forward', pad_mode='symmetric')
grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
else:
grad = grad
f = odl.solvers.GroupL1Norm(grad.range, exponent=2)
return f * grad
class TotalVariationNonNegative(odl.solvers.Functional):
"""Total variation function with nonnegativity constraint and strongly
convex relaxation.
In formulas, this functional may represent
alpha * |grad x|_1 + char_fun(x) + beta/2 |x|^2_2
with regularization parameter alpha and strong convexity beta. In addition,
the nonnegativity constraint is achieved with the characteristic function
char_fun(x) = 0 if x >= 0 and infty else.
Parameters
----------
domain : odlspace
domain of TV functional
alpha : scalar, optional
Regularization parameter, positive
prox_options : dict, optional
name: string, optional
name of the method to perform the prox operator, default=FGP
warmstart: boolean, optional
Do you want a warm start, i.e. start with the dual variable
from the last call? default=True
niter: int, optional
number of iterations per call, default=5
p: array, optional
initial dual variable, default=zeros
grad : gradient operator, optional
Gradient operator to be used within the total variation functional.
default=see TV
"""
def __init__(self, domain, alpha=1, prox_options={}, grad=None,
strong_convexity=0):
"""
"""
self.strong_convexity = strong_convexity
if 'name' not in prox_options:
prox_options['name'] = 'FGP'
if 'warmstart' not in prox_options:
prox_options['warmstart'] = True
if 'niter' not in prox_options:
prox_options['niter'] = 5
if 'p' not in prox_options:
prox_options['p'] = None
if 'tol' not in prox_options:
prox_options['tol'] = None
self.prox_options = prox_options
self.alpha = alpha
self.tv = total_variation(domain, grad=grad)
self.grad = self.tv.right
self.nn = odl.solvers.IndicatorBox(domain, 0, np.inf)
self.l2 = 0.5 * odl.solvers.L2NormSquared(domain)
self.proj_P = self.tv.left.convex_conj.proximal(0)
self.proj_C = self.nn.proximal(1)
super().__init__(space=domain, linear=False, grad_lipschitz=0)
def __call__(self, x):
"""Evaluate functional.
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = space.one()
>>> tvnn(x) < 1e-10
Check that negative functions are mapped to infty
>>> import odl.contrib.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> np.isinf(tvnn(x))
"""
nn = self.nn(x)
if nn is np.inf:
return nn
else:
out = self.alpha * self.tv(x) + nn
if self.strong_convexity > 0:
out += self.strong_convexity * self.l2(x)
return out
def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero()
p = opts['p']
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out
else:
raise NotImplementedError('Not yet implemented')
return tv_prox
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs):
"""Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
factr = 1 / (grad.norm**2 * alpha)
q = p.copy()
x = data.space.zero()
t = 1.
if tol is None:
def convergence_eval(p1, p2):
return False
else:
def convergence_eval(p1, p2):
return (p1 - p2).norm() / p1.norm() < tol
pnew = p.copy()
if callback is not None:
callback(p)
for k in range(niter):
t0 = t
grad.adjoint(q, out=x)
proj_C(data - alpha * x, out=x)
grad(x, out=pnew)
pnew *= factr
pnew += q
proj_P(pnew, out=pnew)
converged = convergence_eval(p, pnew)
if not converged:
# update step size
t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2.
# calculate next iterate
q[:] = pnew + (t0 - 1) / t * (pnew - p)
p[:] = pnew
if converged:
t = None
break
if callback is not None:
callback(p)
# get current image estimate
x = proj_C(data - alpha * grad.adjoint(p))
return x
class Blur2D(odl.Operator):
"""Blur operator"""
def __init__(self, domain, kernel, boundary_condition='wrap'):
"""Initialize a new instance.
"""
super().__init__(domain=domain, range=domain, linear=True)
self.__kernel = kernel
self.__boundary_condition = boundary_condition
@property
def kernel(self):
return self.__kernel
@property
def boundary_condition(self):
return self.__boundary_condition
def _call(self, x, out):
out[:] = scipy.signal.convolve2d(x, self.kernel, mode='same',
boundary='wrap')
@property
def gradient(self):
raise NotImplementedError('No yet implemented')
@property
def adjoint(self):
adjoint_kernel = self.kernel.copy().conj()
adjoint_kernel = np.fliplr(np.flipud(adjoint_kernel))
return Blur2D(self.domain, adjoint_kernel, self.boundary_condition)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.kernel,
self.boundary_condition)
class KullbackLeiblerSmooth(odl.solvers.Functional):
"""The smooth Kullback-Leibler divergence functional.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
smooth Kullback-Leibler functional :math:`\\phi` is defined as
.. math::
\\phi(x) = \\sum_{i=1}^n \\begin{cases}
x + r - y + y * \\log(y / (x + r))
& \\text{if $x \geq 0$} \\
(y / (2 * r^2)) * x^2 + (1 - y / r) * x + r - b +
b * \\log(b / r) & \\text{else}
\\end{cases}
where all variables on the right hand side of the equation have a subscript
i which is omitted for readability.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
def __init__(self, space, data, background):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
data : ``space`` `element-like`
Data vector which has to be non-negative.
background : ``space`` `element-like`
Background vector which has to be non-negative.
"""
self.strong_convexity = 0
if background.ufuncs.less_equal(0).ufuncs.sum() > 0:
raise NotImplementedError('Background must be positive')
super().__init__(space=space, linear=False,
grad_lipschitz=np.max(data / background ** 2))
if data not in self.domain:
raise ValueError('`data` not in `domain`'
''.format(data, self.domain))
self.__data = data
self.__background = background
@property
def data(self):
"""The data in the Kullback-Leibler functional."""
return self.__data
@property
def background(self):
"""The background in the Kullback-Leibler functional."""
return self.__background
def _call(self, x):
"""Return the KL-diveregnce in the point ``x``.
If any components of ``x`` is non-positive, the value is positive
infinity.
"""
y = self.data
r = self.background
obj = self.domain.zero()
# x + r - y + y * log(y / (x + r)) = x - y * log(x + r) + c1
# with c1 = r - y + y * log y
i = x.ufuncs.greater_equal(0)
obj[i] = x[i] + r[i] - y[i]
j = y.ufuncs.greater(0)
k = i.ufuncs.logical_and(j)
obj[k] += y[k] * (y[k] / (x[k] + r[k])).ufuncs.log()
# (y / (2 * r^2)) * x^2 + (1 - y / r) * x + r - b + b * log(b / r)
# = (y / (2 * r^2)) * x^2 + (1 - y / r) * x + c2
# with c2 = r - b + b * log(b / r)
i = i.ufuncs.logical_not()
obj[i] += (y[i] / (2 * r[i]**2) * x[i]**2 + (1 - y[i] / r[i]) * x[i] +
r[i] - y[i])
k = i.ufuncs.logical_and(j)
obj[k] += y[k] * (y[k] / r[k]).ufuncs.log()
return obj.inner(self.domain.one())
@property
def gradient(self):
"""Gradient operator of the functional.
"""
raise NotImplementedError('No yet implemented')
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
"""
raise NotImplementedError('No yet implemented')
@property
def convex_conj(self):
"""The convex conjugate functional of the KL-functional."""
return KullbackLeiblerSmoothConvexConj(self.domain, self.data,
self.background)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.data, self.background)
class KullbackLeiblerSmoothConvexConj(odl.solvers.Functional):
"""The convex conjugate of the smooth Kullback-Leibler divergence functional.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
convex conjugate of the smooth Kullback-Leibler functional :math:`\\phi^*`
is defined as
.. math::
\\phi^*(x) = \\sum_{i=1}^n \\begin{cases}
r^2 / (2 * y) * x^2 + (r - r^2 / y) * x + r^2 / (2 * y) +
3 / 2 * y - 2 * r - y * log(y / r)
& \\text{if $x < 1 - y / r$} \\
- r * x - y * log(1 - x)
& \\text{if $1 - y / r <= x < 1} \\
+ \infty
& \\text{else}
\\end{cases}
where all variables on the right hand side of the equation have a subscript
:math:`i` which is omitted for readability.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
def __init__(self, space, data, background):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
data : ``space`` `element-like`
Data vector which has to be non-negative.
background : ``space`` `element-like`
Background vector which has to be non-negative.
"""
if background.ufuncs.less_equal(0).ufuncs.sum() > 0:
raise NotImplementedError('Background must be positive')
super().__init__(space=space, linear=False,
grad_lipschitz=np.inf)
if data is not None and data not in self.domain:
raise ValueError('`data` not in `domain`'
''.format(data, self.domain))
self.__data = data
self.__background = background
if np.min(self.data) == 0:
self.strong_convexity = np.inf
else:
self.strong_convexity = np.min(self.background**2 / self.data)
@property
def data(self):
"""The data in the Kullback-Leibler functional."""
return self.__data
@property
def background(self):
"""The background in the Kullback-Leibler functional."""
return self.__background
def _call(self, x):
"""Return the value in the point ``x``.
If any components of ``x`` is larger than or equal to 1, the value is
positive infinity.
"""
# TODO: cover properly the case data = 0
y = self.data
r = self.background
# if any element is greater or equal to one
if x.ufuncs.greater_equal(1).ufuncs.sum() > 0:
return np.inf
obj = self.domain.zero()
# out = sum(f)
# f =
# if x < 1 - y / r:
# r^2 / (2 * y) * x^2 + (r - r^2 / y) * x + r^2 / (2 * y) +
# 3 / 2 * y - 2 * r - y * log(y / r)
# if x >= 1 - y / r:
# - r * x - y * log(1 - x)
i = x.ufuncs.less(1 - y / r)
ry = r[i]**2 / y[i]
obj[i] += (ry / 2 * x[i]**2 + (r[i] - ry) * x[i] + ry / 2 +
3 / 2 * y[i] - 2 * r[i])
j = y.ufuncs.greater(0)
k = i.ufuncs.logical_and(j)
obj[k] -= y[k] * (y[k] / r[k]).ufuncs.log()
i = i.ufuncs.logical_not()
obj[i] -= r[i] * x[i]
k = i.ufuncs.logical_and(j)
obj[k] -= y[k] * (1 - x[k]).ufuncs.log()
return obj.inner(self.domain.one())
@property
def gradient(self):
"""Gradient operator of the functional."""
raise NotImplementedError('No yet implemented')
@property
def proximal(self):
space = self.domain
y = self.data
r = self.background
class ProxKullbackLeiblerSmoothConvexConj(odl.Operator):
"""Proximal operator of the convex conjugate of the smooth
Kullback-Leibler functional.
"""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter
"""
self.sigma = float(sigma)
self.background = r
self.data = y
super().__init__(domain=space, range=space, linear=False)
def _call(self, x, out):
s = self.sigma
y = self.data
r = self.background
sr = s * r
sy = s * y
# out =
# if x < 1 - y / r:
# (y * x - s * r * y + s * r**2) / (y + s * r**2)
# if x >= 1 - y / r:
# 0.5 * (x + s * r + 1 -
# sqrt((x + s * r - 1)**2 + 4 * s * y)
i = x.ufuncs.less(1 - y / r)
# TODO: This may be faster without indexing on the GPU?
out[i] = ((y[i] * x[i] - sr[i] * y[i] + sr[i] * r[i]) /
(y[i] + sr[i] * r[i]))
i.ufuncs.logical_not(out=i)
out[i] = (x[i] + sr[i] + 1 -
((x[i] + sr[i] - 1) ** 2 + 4 * sy[i]).ufuncs.sqrt())
out[i] /= 2
return out
return ProxKullbackLeiblerSmoothConvexConj
@property
def convex_conj(self):
"""The convex conjugate functional of the smooth KL-functional."""
return KullbackLeiblerSmooth(self.domain, self.data,
self.background)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.data, self.background)
| mpl-2.0 | -6,678,552,785,341,398,000 | 29.621477 | 81 | 0.528383 | false |
Jeff-Tian/mybnb | Python27/Lib/lib-tk/test/test_ttk/test_functions.py | 2 | 17810 | # -*- encoding: utf-8 -*-
import sys
import unittest
import ttk
class MockTkApp:
def splitlist(self, arg):
if isinstance(arg, tuple):
return arg
return arg.split(':')
def wantobjects(self):
return True
class MockTclObj(object):
typename = 'test'
def __init__(self, val):
self.val = val
def __str__(self):
return unicode(self.val)
class MockStateSpec(object):
typename = 'StateSpec'
def __init__(self, *args):
self.val = args
def __str__(self):
return ' '.join(self.val)
class InternalFunctionsTest(unittest.TestCase):
def test_format_optdict(self):
def check_against(fmt_opts, result):
for i in range(0, len(fmt_opts), 2):
self.assertEqual(result.pop(fmt_opts[i]), fmt_opts[i + 1])
if result:
self.fail("result still got elements: %s" % result)
# passing an empty dict should return an empty object (tuple here)
self.assertFalse(ttk._format_optdict({}))
# check list formatting
check_against(
ttk._format_optdict({'fg': 'blue', 'padding': [1, 2, 3, 4]}),
{'-fg': 'blue', '-padding': '1 2 3 4'})
# check tuple formatting (same as list)
check_against(
ttk._format_optdict({'test': (1, 2, '', 0)}),
{'-test': '1 2 {} 0'})
# check untouched values
check_against(
ttk._format_optdict({'test': {'left': 'as is'}}),
{'-test': {'left': 'as is'}})
# check script formatting
check_against(
ttk._format_optdict(
{'test': [1, -1, '', '2m', 0], 'test2': 3,
'test3': '', 'test4': 'abc def',
'test5': '"abc"', 'test6': '{}',
'test7': '} -spam {'}, script=True),
{'-test': '{1 -1 {} 2m 0}', '-test2': '3',
'-test3': '{}', '-test4': '{abc def}',
'-test5': '{"abc"}', '-test6': r'\{\}',
'-test7': r'\}\ -spam\ \{'})
opts = {u'αβγ': True, u'á': False}
orig_opts = opts.copy()
# check if giving unicode keys is fine
check_against(ttk._format_optdict(opts), {u'-αβγ': True, u'-á': False})
# opts should remain unchanged
self.assertEqual(opts, orig_opts)
# passing values with spaces inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('one two', 'three')}),
{'-option': '{one two} three'})
check_against(
ttk._format_optdict(
{'option': ('one\ttwo', 'three')}),
{'-option': '{one\ttwo} three'})
# passing empty strings inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('', 'one')}),
{'-option': '{} one'})
# passing values with braces inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('one} {two', 'three')}),
{'-option': r'one\}\ \{two three'})
# passing quoted strings inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('"one"', 'two')}),
{'-option': '{"one"} two'})
check_against(
ttk._format_optdict(
{'option': ('{one}', 'two')}),
{'-option': r'\{one\} two'})
# ignore an option
amount_opts = len(ttk._format_optdict(opts, ignore=(u'á'))) // 2
self.assertEqual(amount_opts, len(opts) - 1)
# ignore non-existing options
amount_opts = len(ttk._format_optdict(opts, ignore=(u'á', 'b'))) // 2
self.assertEqual(amount_opts, len(opts) - 1)
# ignore every option
self.assertFalse(ttk._format_optdict(opts, ignore=opts.keys()))
def test_format_mapdict(self):
opts = {'a': [('b', 'c', 'val'), ('d', 'otherval'), ('', 'single')]}
result = ttk._format_mapdict(opts)
self.assertEqual(len(result), len(opts.keys()) * 2)
self.assertEqual(result, ('-a', '{b c} val d otherval {} single'))
self.assertEqual(ttk._format_mapdict(opts, script=True),
('-a', '{{b c} val d otherval {} single}'))
self.assertEqual(ttk._format_mapdict({2: []}), ('-2', ''))
opts = {u'üñíćódè': [(u'á', u'vãl')]}
result = ttk._format_mapdict(opts)
self.assertEqual(result, (u'-üñíćódè', u'á vãl'))
# empty states
valid = {'opt': [('', u'', 'hi')]}
self.assertEqual(ttk._format_mapdict(valid), ('-opt', '{ } hi'))
# when passing multiple states, they all must be strings
invalid = {'opt': [(1, 2, 'valid val')]}
self.assertRaises(TypeError, ttk._format_mapdict, invalid)
invalid = {'opt': [([1], '2', 'valid val')]}
self.assertRaises(TypeError, ttk._format_mapdict, invalid)
# but when passing a single state, it can be anything
valid = {'opt': [[1, 'value']]}
self.assertEqual(ttk._format_mapdict(valid), ('-opt', '1 value'))
# special attention to single states which evalute to False
for stateval in (None, 0, False, '', set()): # just some samples
valid = {'opt': [(stateval, 'value')]}
self.assertEqual(ttk._format_mapdict(valid),
('-opt', '{} value'))
# values must be iterable
opts = {'a': None}
self.assertRaises(TypeError, ttk._format_mapdict, opts)
# items in the value must have size >= 2
self.assertRaises(IndexError, ttk._format_mapdict,
{'a': [('invalid', )]})
def test_format_elemcreate(self):
self.assertTrue(ttk._format_elemcreate(None), (None, ()))
## Testing type = image
# image type expects at least an image name, so this should raise
# IndexError since it tries to access the index 0 of an empty tuple
self.assertRaises(IndexError, ttk._format_elemcreate, 'image')
# don't format returned values as a tcl script
# minimum acceptable for image type
self.assertEqual(ttk._format_elemcreate('image', False, 'test'),
("test ", ()))
# specifying a state spec
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('', 'a')), ("test {} a", ()))
# state spec with multiple states
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('a', 'b', 'c')), ("test {a b} c", ()))
# state spec and options
res = ttk._format_elemcreate('image', False, 'test',
('a', 'b'), a='x', b='y')
self.assertEqual(res[0], "test a b")
self.assertEqual(set(res[1]), {"-a", "x", "-b", "y"})
# format returned values as a tcl script
# state spec with multiple states and an option with a multivalue
self.assertEqual(ttk._format_elemcreate('image', True, 'test',
('a', 'b', 'c', 'd'), x=[2, 3]), ("{test {a b c} d}", "-x {2 3}"))
## Testing type = vsapi
# vsapi type expects at least a class name and a part_id, so this
# should raise an ValueError since it tries to get two elements from
# an empty tuple
self.assertRaises(ValueError, ttk._format_elemcreate, 'vsapi')
# don't format returned values as a tcl script
# minimum acceptable for vsapi
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b'),
("a b ", ()))
# now with a state spec with multiple states
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b',
('a', 'b', 'c')), ("a b {a b} c", ()))
# state spec and option
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b',
('a', 'b'), opt='x'), ("a b a b", ("-opt", "x")))
# format returned values as a tcl script
# state spec with a multivalue and an option
self.assertEqual(ttk._format_elemcreate('vsapi', True, 'a', 'b',
('a', 'b', [1, 2]), opt='x'), ("{a b {a b} {1 2}}", "-opt x"))
# Testing type = from
# from type expects at least a type name
self.assertRaises(IndexError, ttk._format_elemcreate, 'from')
self.assertEqual(ttk._format_elemcreate('from', False, 'a'),
('a', ()))
self.assertEqual(ttk._format_elemcreate('from', False, 'a', 'b'),
('a', ('b', )))
self.assertEqual(ttk._format_elemcreate('from', True, 'a', 'b'),
('{a}', 'b'))
def test_format_layoutlist(self):
def sample(indent=0, indent_size=2):
return ttk._format_layoutlist(
[('a', {'other': [1, 2, 3], 'children':
[('b', {'children':
[('c', {'children':
[('d', {'nice': 'opt'})], 'something': (1, 2)
})]
})]
})], indent=indent, indent_size=indent_size)[0]
def sample_expected(indent=0, indent_size=2):
spaces = lambda amount=0: ' ' * (amount + indent)
return (
"%sa -other {1 2 3} -children {\n"
"%sb -children {\n"
"%sc -something {1 2} -children {\n"
"%sd -nice opt\n"
"%s}\n"
"%s}\n"
"%s}" % (spaces(), spaces(indent_size),
spaces(2 * indent_size), spaces(3 * indent_size),
spaces(2 * indent_size), spaces(indent_size), spaces()))
# empty layout
self.assertEqual(ttk._format_layoutlist([])[0], '')
# smallest (after an empty one) acceptable layout
smallest = ttk._format_layoutlist([('a', None)], indent=0)
self.assertEqual(smallest,
ttk._format_layoutlist([('a', '')], indent=0))
self.assertEqual(smallest[0], 'a')
# testing indentation levels
self.assertEqual(sample(), sample_expected())
for i in range(4):
self.assertEqual(sample(i), sample_expected(i))
self.assertEqual(sample(i, i), sample_expected(i, i))
# invalid layout format, different kind of exceptions will be
# raised
# plain wrong format
self.assertRaises(ValueError, ttk._format_layoutlist,
['bad', 'format'])
self.assertRaises(TypeError, ttk._format_layoutlist, None)
# _format_layoutlist always expects the second item (in every item)
# to act like a dict (except when the value evalutes to False).
self.assertRaises(AttributeError,
ttk._format_layoutlist, [('a', 'b')])
# bad children formatting
self.assertRaises(ValueError, ttk._format_layoutlist,
[('name', {'children': {'a': None}})])
def test_script_from_settings(self):
# empty options
self.assertFalse(ttk._script_from_settings({'name':
{'configure': None, 'map': None, 'element create': None}}))
# empty layout
self.assertEqual(
ttk._script_from_settings({'name': {'layout': None}}),
"ttk::style layout name {\nnull\n}")
configdict = {u'αβγ': True, u'á': False}
self.assertTrue(
ttk._script_from_settings({'name': {'configure': configdict}}))
mapdict = {u'üñíćódè': [(u'á', u'vãl')]}
self.assertTrue(
ttk._script_from_settings({'name': {'map': mapdict}}))
# invalid image element
self.assertRaises(IndexError,
ttk._script_from_settings, {'name': {'element create': ['image']}})
# minimal valid image
self.assertTrue(ttk._script_from_settings({'name':
{'element create': ['image', 'name']}}))
image = {'thing': {'element create':
['image', 'name', ('state1', 'state2', 'val')]}}
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} ")
image['thing']['element create'].append({'opt': 30})
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} "
"-opt 30")
image['thing']['element create'][-1]['opt'] = [MockTclObj(3),
MockTclObj('2m')]
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} "
"-opt {3 2m}")
def test_tclobj_to_py(self):
self.assertEqual(
ttk._tclobj_to_py((MockStateSpec('a', 'b'), 'val')),
[('a', 'b', 'val')])
self.assertEqual(
ttk._tclobj_to_py([MockTclObj('1'), 2, MockTclObj('3m')]),
[1, 2, '3m'])
def test_list_from_statespec(self):
def test_it(sspec, value, res_value, states):
self.assertEqual(ttk._list_from_statespec(
(sspec, value)), [states + (res_value, )])
states_even = tuple('state%d' % i for i in range(6))
statespec = MockStateSpec(*states_even)
test_it(statespec, 'val', 'val', states_even)
test_it(statespec, MockTclObj('val'), 'val', states_even)
states_odd = tuple('state%d' % i for i in range(5))
statespec = MockStateSpec(*states_odd)
test_it(statespec, 'val', 'val', states_odd)
test_it(('a', 'b', 'c'), MockTclObj('val'), 'val', ('a', 'b', 'c'))
def test_list_from_layouttuple(self):
tk = MockTkApp()
# empty layout tuple
self.assertFalse(ttk._list_from_layouttuple(tk, ()))
# shortest layout tuple
self.assertEqual(ttk._list_from_layouttuple(tk, ('name', )),
[('name', {})])
# not so interesting ltuple
sample_ltuple = ('name', '-option', 'value')
self.assertEqual(ttk._list_from_layouttuple(tk, sample_ltuple),
[('name', {'option': 'value'})])
# empty children
self.assertEqual(ttk._list_from_layouttuple(tk,
('something', '-children', ())),
[('something', {'children': []})]
)
# more interesting ltuple
ltuple = (
'name', '-option', 'niceone', '-children', (
('otherone', '-children', (
('child', )), '-otheropt', 'othervalue'
)
)
)
self.assertEqual(ttk._list_from_layouttuple(tk, ltuple),
[('name', {'option': 'niceone', 'children':
[('otherone', {'otheropt': 'othervalue', 'children':
[('child', {})]
})]
})]
)
# bad tuples
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus'))
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus', 'value'))
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('something', '-children')) # no children
def test_val_or_dict(self):
def func(res, opt=None, val=None):
if opt is None:
return res
if val is None:
return "test val"
return (opt, val)
tk = MockTkApp()
tk.call = func
self.assertEqual(ttk._val_or_dict(tk, {}, '-test:3'),
{'test': '3'})
self.assertEqual(ttk._val_or_dict(tk, {}, ('-test', 3)),
{'test': 3})
self.assertEqual(ttk._val_or_dict(tk, {'test': None}, 'x:y'),
'test val')
self.assertEqual(ttk._val_or_dict(tk, {'test': 3}, 'x:y'),
{'test': 3})
def test_convert_stringval(self):
tests = (
(0, 0), ('09', 9), ('a', 'a'), (u'áÚ', u'áÚ'), ([], '[]'),
(None, 'None')
)
for orig, expected in tests:
self.assertEqual(ttk._convert_stringval(orig), expected)
if sys.getdefaultencoding() == 'ascii':
self.assertRaises(UnicodeDecodeError,
ttk._convert_stringval, 'á')
class TclObjsToPyTest(unittest.TestCase):
def test_unicode(self):
adict = {'opt': u'välúè'}
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': u'välúè'})
adict['opt'] = MockTclObj(adict['opt'])
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': u'välúè'})
def test_multivalues(self):
adict = {'opt': [1, 2, 3, 4]}
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': [1, 2, 3, 4]})
adict['opt'] = [1, 'xm', 3]
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': [1, 'xm', 3]})
adict['opt'] = (MockStateSpec('a', 'b'), u'válũè')
self.assertEqual(ttk.tclobjs_to_py(adict),
{'opt': [('a', 'b', u'válũè')]})
self.assertEqual(ttk.tclobjs_to_py({'x': ['y z']}),
{'x': ['y z']})
def test_nosplit(self):
self.assertEqual(ttk.tclobjs_to_py({'text': 'some text'}),
{'text': 'some text'})
tests_nogui = (InternalFunctionsTest, TclObjsToPyTest)
if __name__ == "__main__":
from test.test_support import run_unittest
run_unittest(*tests_nogui)
| apache-2.0 | -6,791,793,834,562,789,000 | 36.094421 | 80 | 0.504619 | false |
laurentb/weboob | modules/journaldesfemmes/module.py | 1 | 2124 | # -*- coding: utf-8 -*-
# Copyright(C) 2018 Phyks (Lucas Verney)
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.recipe import CapRecipe, Recipe
from .browser import JournaldesfemmesBrowser
__all__ = ['JournaldesfemmesModule']
class JournaldesfemmesModule(Module, CapRecipe):
NAME = 'journaldesfemmes'
DESCRIPTION = 'journaldesfemmes website'
MAINTAINER = 'Phyks (Lucas Verney)'
EMAIL = '[email protected]'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
BROWSER = JournaldesfemmesBrowser
def get_recipe(self, _id):
"""
Get a recipe object from an ID.
:param _id: ID of recipe
:type _id: str
:rtype: :class:`Recipe`
"""
return self.browser.get_recipe(_id)
def iter_recipes(self, pattern):
"""
Search recipes and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`Recipe`]
"""
return self.browser.search_recipes(pattern)
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
recipe = self.browser.get_recipe(recipe.id, recipe)
if 'comments' in fields:
recipe.comments = list(self.browser.get_comments(recipe.id))
return recipe
OBJECTS = {Recipe: fill_recipe}
| lgpl-3.0 | 1,661,880,022,513,466,600 | 28.915493 | 77 | 0.675612 | false |
RT-Thread/rt-thread | bsp/tm4c129x/rtconfig.py | 12 | 3663 | # BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D)
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
PART_TYPE = 'PART_TM4C129XNCZAD'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:\ArdaArmTools\Sourcery_Lite\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:\Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# tool-chains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-tm4c129x.map,-cref,-u,Reset_Handler -T tm4c_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter tm4c_rom.sct --info sizes --info totals --info unused --info veneers --list rtthread-tm4c129x.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm' # + ' -D' + PART_TYPE
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "tm4c_rom.icf"'
LFLAGS += ' --entry __iar_program_start'
#LFLAGS += ' --silent'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = '' | apache-2.0 | -1,989,079,979,853,010,700 | 26.548872 | 141 | 0.548185 | false |
nhr/openshift-ansible | roles/lib_openshift/src/lib/project.py | 82 | 2503 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class ProjectConfig(OpenShiftCLIConfig):
''' project config object '''
def __init__(self, rname, namespace, kubeconfig, project_options):
super(ProjectConfig, self).__init__(rname, None, kubeconfig, project_options)
class Project(Yedit):
''' Class to wrap the oc command line tools '''
annotations_path = "metadata.annotations"
kind = 'Project'
annotation_prefix = 'openshift.io/'
def __init__(self, content):
'''Project constructor'''
super(Project, self).__init__(content=content)
def get_annotations(self):
''' return the annotations'''
return self.get(Project.annotations_path) or {}
def add_annotations(self, inc_annos):
''' add an annotation to the other annotations'''
if not isinstance(inc_annos, list):
inc_annos = [inc_annos]
annos = self.get_annotations()
if not annos:
self.put(Project.annotations_path, inc_annos)
else:
for anno in inc_annos:
for key, value in anno.items():
annos[key] = value
return True
def find_annotation(self, key):
''' find an annotation'''
annotations = self.get_annotations()
for anno in annotations:
if Project.annotation_prefix + key == anno:
return annotations[anno]
return None
def delete_annotation(self, inc_anno_keys):
''' remove an annotation from a project'''
if not isinstance(inc_anno_keys, list):
inc_anno_keys = [inc_anno_keys]
annos = self.get(Project.annotations_path) or {}
if not annos:
return True
removed = False
for inc_anno in inc_anno_keys:
anno = self.find_annotation(inc_anno)
if anno:
del annos[Project.annotation_prefix + anno]
removed = True
return removed
def update_annotation(self, key, value):
''' remove an annotation for a project'''
annos = self.get(Project.annotations_path) or {}
if not annos:
return True
updated = False
anno = self.find_annotation(key)
if anno:
annos[Project.annotation_prefix + key] = value
updated = True
else:
self.add_annotations({Project.annotation_prefix + key: value})
return updated
| apache-2.0 | -6,775,963,328,135,646,000 | 28.447059 | 85 | 0.584499 | false |
nicobot/electron | tools/mac/apply_locales.py | 202 | 1454 | #!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv)) | mit | 7,979,353,858,805,241,000 | 31.333333 | 78 | 0.634801 | false |
klen/peewee_migrate | tests/test_cli.py | 1 | 3157 | from click.testing import CliRunner
import pytest
from peewee_migrate.cli import cli, get_router
runner = CliRunner()
@pytest.fixture
def dir_option(tmpdir):
return '--directory=%s' % tmpdir
@pytest.fixture
def db_url(tmpdir):
db_path = '%s/test_sqlite.db' % tmpdir
open(db_path, 'a').close()
return 'sqlite:///%s' % db_path
@pytest.fixture
def db_option(db_url):
return '--database=%s' % db_url
@pytest.fixture
def router(tmpdir, db_url):
return lambda: get_router(str(tmpdir), db_url)
@pytest.fixture
def migrations(router):
migrations_number = 5
name = 'test'
for i in range(migrations_number):
router().create(name)
return ['00%s_test' % i for i in range(1, migrations_number + 1)]
@pytest.fixture
def migrations_str(migrations):
return ', '.join(migrations)
def test_help():
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'migrate' in result.output
assert 'create' in result.output
assert 'rollback' in result.output
def test_create(dir_option, db_option):
for i in range(2):
result = runner.invoke(cli, ['create', dir_option, db_option, '-vvv', 'test'])
assert result.exit_code == 0
def test_migrate(dir_option, db_option, migrations_str):
result = runner.invoke(cli, ['migrate', dir_option, db_option])
assert result.exit_code == 0
assert 'Migrations completed: %s' % migrations_str in result.output
def test_list(dir_option, db_option, migrations):
result = runner.invoke(cli, ['list', dir_option, db_option])
assert 'Migrations are done:\n' in result.output
assert 'Migrations are undone:\n%s' % '\n'.join(migrations) in result.output
def test_rollback(dir_option, db_option, router, migrations):
router().run()
count_overflow = len(migrations) + 1
result = runner.invoke(cli, ['rollback', dir_option, db_option, '--count=%s' % count_overflow])
assert result.exception
assert 'Unable to rollback %s migrations' % count_overflow in result.exception.args[0]
assert router().done == migrations
result = runner.invoke(cli, ['rollback', dir_option, db_option])
assert not result.exception
assert router().done == migrations[:-1]
result = runner.invoke(cli, ['rollback', dir_option, db_option, '004_test'])
assert not result.exception
assert router().done == migrations[:-2]
result = runner.invoke(cli, ['rollback', dir_option, db_option, '--count=2'])
assert not result.exception
assert router().done == migrations[:-4]
result = runner.invoke(cli, ['rollback', dir_option, db_option, '005_test'])
assert result.exception
assert result.exception.args[0] == 'Only last migration can be canceled.'
assert router().done == migrations[:-4]
def test_fake(dir_option, db_option, migrations_str, router):
result = runner.invoke(cli, ['migrate', dir_option, db_option, '-v', '--fake'])
assert result.exit_code == 0
assert 'Migrations completed: %s' % migrations_str in result.output
# TODO: Find a way of testing fake. This is unclear why the following fails.
# assert not router().done
| bsd-3-clause | 3,618,773,772,246,275,000 | 28.783019 | 99 | 0.670573 | false |
akretion/bank-statement-reconcile-simple | account_statement_completion_label_simple/wizard/account_statement_label_create.py | 1 | 1772 | # Copyright 2018-2019 Akretion France (http://www.akretion.com/)
# @author: Alexis de Lattre <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class AccountStatementLabelCreate(models.TransientModel):
_name = 'account.statement.label.create'
_description = 'Account Statement Label Create Wizard'
@api.model
def default_get(self, fields_list):
res = super(AccountStatementLabelCreate, self).default_get(
fields_list)
assert self._context.get('active_model') ==\
'account.bank.statement.line', 'Wrong active model'
assert self._context.get('active_id'), 'missing active_id in context'
line = self.env['account.bank.statement.line'].browse(
self._context['active_id'])
res.update({
'new_label': line.name,
'statement_line_id': line.id,
})
return res
statement_line_id = fields.Many2one(
'account.bank.statement.line', string='Bank Statement Line',
readonly=True)
current_label = fields.Char(
related='statement_line_id.name', readonly=True,
string='Statement Line Label')
new_label = fields.Char(string="New Label", required=True)
partner_id = fields.Many2one(
'res.partner', string='Partner', domain=[('parent_id', '=', False)],
required=True)
def run(self):
self.ensure_one()
self.env['account.statement.label'].create({
'partner_id': self.partner_id.id,
'label': self.new_label,
'company_id': self.statement_line_id.company_id.id,
})
self.statement_line_id.statement_id.update_statement_lines()
return True
| agpl-3.0 | -3,214,800,522,493,972,500 | 37.521739 | 77 | 0.630361 | false |
jumpserver/jumpserver | apps/ops/inventory.py | 2 | 5220 | # -*- coding: utf-8 -*-
#
from django.conf import settings
from .ansible.inventory import BaseInventory
from common.utils import get_logger
__all__ = [
'JMSInventory', 'JMSCustomInventory',
]
logger = get_logger(__file__)
class JMSBaseInventory(BaseInventory):
windows_ssh_default_shell = settings.WINDOWS_SSH_DEFAULT_SHELL
def convert_to_ansible(self, asset, run_as_admin=False):
info = {
'id': asset.id,
'hostname': asset.hostname,
'ip': asset.ip,
'port': asset.ssh_port,
'vars': dict(),
'groups': [],
}
if asset.domain and asset.domain.has_gateway():
info["vars"].update(self.make_proxy_command(asset))
if run_as_admin:
info.update(asset.get_auth_info())
if asset.is_unixlike():
info["become"] = asset.admin_user.become_info
if asset.is_windows():
info["vars"].update({
"ansible_connection": "ssh",
"ansible_shell_type": self.windows_ssh_default_shell,
})
for label in asset.labels.all():
info["vars"].update({
label.name: label.value
})
if asset.domain:
info["vars"].update({
"domain": asset.domain.name,
})
return info
@staticmethod
def make_proxy_command(asset):
gateway = asset.domain.random_gateway()
proxy_command_list = [
"ssh", "-o", "Port={}".format(gateway.port),
"-o", "StrictHostKeyChecking=no",
"{}@{}".format(gateway.username, gateway.ip),
"-W", "%h:%p", "-q",
]
if gateway.password:
proxy_command_list.insert(
0, "sshpass -p '{}'".format(gateway.password)
)
if gateway.private_key:
proxy_command_list.append("-i {}".format(gateway.private_key_file))
proxy_command = "'-o ProxyCommand={}'".format(
" ".join(proxy_command_list)
)
return {"ansible_ssh_common_args": proxy_command}
class JMSInventory(JMSBaseInventory):
"""
JMS Inventory is the inventory with jumpserver assets, so you can
write you own inventory, construct you inventory,
user_info is obtained from admin_user or asset_user
"""
def __init__(self, assets, run_as_admin=False, run_as=None, become_info=None, system_user=None):
"""
:param assets: assets
:param run_as_admin: True 是否使用管理用户去执行, 每台服务器的管理用户可能不同
:param run_as: 用户名(添加了统一的资产用户管理器之后AssetUserManager加上之后修改为username)
:param become_info: 是否become成某个用户去执行
"""
self.assets = assets
self.using_admin = run_as_admin
self.run_as = run_as
self.system_user = system_user
self.become_info = become_info
host_list = []
for asset in assets:
host = self.convert_to_ansible(asset, run_as_admin=run_as_admin)
if run_as is not None:
run_user_info = self.get_run_user_info(host)
host.update(run_user_info)
if become_info and asset.is_unixlike():
host.update(become_info)
host_list.append(host)
super().__init__(host_list=host_list)
def get_run_user_info(self, host):
from assets.backends import AssetUserManager
if not self.run_as and not self.system_user:
return {}
asset_id = host.get('id', '')
asset = self.assets.filter(id=asset_id).first()
if not asset:
logger.error('Host not found: ', asset_id)
if self.system_user:
self.system_user.load_asset_special_auth(asset=asset, username=self.run_as)
return self.system_user._to_secret_json()
try:
manager = AssetUserManager()
run_user = manager.get_latest(username=self.run_as, asset=asset, prefer='system_user')
return run_user._to_secret_json()
except Exception as e:
logger.error(e, exc_info=True)
return {}
class JMSCustomInventory(JMSBaseInventory):
"""
JMS Custom Inventory is the inventory with jumpserver assets,
user_info is obtained from custom parameter
"""
def __init__(self, assets, username, password=None, public_key=None, private_key=None):
"""
"""
self.assets = assets
self.username = username
self.password = password
self.public_key = public_key
self.private_key = private_key
host_list = []
for asset in assets:
host = self.convert_to_ansible(asset)
run_user_info = self.get_run_user_info()
host.update(run_user_info)
host_list.append(host)
super().__init__(host_list=host_list)
def get_run_user_info(self):
return {
'username': self.username,
'password': self.password,
'public_key': self.public_key,
'private_key': self.private_key
}
| gpl-2.0 | 8,093,723,619,407,908,000 | 30.875 | 100 | 0.563922 | false |
Logan213/is210-week-05-warmup | tests/test_task_04.py | 28 | 1434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests Task 04."""
# Import Python libs
import unittest
import task_04
class Task04TestCase(unittest.TestCase):
"""Test cases for Task 04."""
testmap = {
'not_enough_litterboxes': [2, 1, True, True],
'plenty_of_litterboxes': [1, 2, True, False],
'no_catfood': [1, 2, False, True],
'same_litterboxes': [1, 1, True, False],
}
def test_positional_args(self):
"""
Tests that too_many_kittens has the correct positional arguments.
"""
for case, params in self.testmap.iteritems():
result = task_04.too_many_kittens(*params[:3])
msg = 'Tried {} kittens, {} litterboxes and {} food, expected {}'
msg = msg.format(*params)
self.assertIs(result, params[3], msg)
def test_keyword_args(self):
"""
Tests that too_many_kittens has the correct keyword arguments.
"""
for case, params in self.testmap.iteritems():
result = task_04.too_many_kittens(kittens=params[0],
litterboxes=params[1],
catfood=params[2])
msg = 'Tried {} kittens, {} litterboxes, and {} food, expected {}'
msg = msg.format(*params)
self.assertIs(result, params[3], msg)
if __name__ == '__main__':
unittest.main()
| mpl-2.0 | 8,147,212,527,192,394,000 | 30.173913 | 78 | 0.536262 | false |
tudorvio/nova | nova/cmd/dhcpbridge.py | 41 | 4778 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handle lease database updates from DHCP servers.
"""
from __future__ import print_function
import os
import sys
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
from nova import context
import nova.db.api
from nova import exception
from nova.i18n import _LE
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as objects_base
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
LOG = logging.getLogger(__name__)
def add_lease(mac, ip_address):
"""Set the IP that was assigned by the DHCP server."""
api = network_rpcapi.NetworkAPI()
api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host)
def old_lease(mac, ip_address):
"""Called when an old lease is recognized."""
# NOTE(vish): We assume we heard about this lease the first time.
# If not, we will get it the next time the lease is
# renewed.
pass
def del_lease(mac, ip_address):
"""Called when a lease expires."""
api = network_rpcapi.NetworkAPI()
api.release_fixed_ip(context.get_admin_context(), ip_address,
CONF.host, mac)
def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
network = objects.Network.get_by_id(ctxt, network_id)
network_manager = importutils.import_object(CONF.network_manager)
return network_manager.get_dhcp_leases(ctxt, network)
def add_action_parsers(subparsers):
subparsers.add_parser('init')
# NOTE(cfb): dnsmasq always passes mac, and ip. hostname
# is passed if known. We don't care about
# hostname, but argparse will complain if we
# do not accept it.
for action in ['add', 'del', 'old']:
parser = subparsers.add_parser(action)
parser.add_argument('mac')
parser.add_argument('ip')
parser.add_argument('hostname', nargs='?', default='')
parser.set_defaults(func=globals()[action + '_lease'])
CONF.register_cli_opt(
cfg.SubCommandOpt('action',
title='Action options',
help='Available dhcpbridge options',
handler=add_action_parsers))
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG.error(_LE('No db access allowed in nova-dhcpbridge: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-dhcpbridge')
nova.db.api.IMPL = NoDB()
def main():
"""Parse environment and arguments and call the appropriate action."""
config.parse_args(sys.argv,
default_config_files=jsonutils.loads(os.environ['CONFIG_FILE']))
logging.setup(CONF, "nova")
global LOG
LOG = logging.getLogger('nova.dhcpbridge')
objects.register_all()
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
if CONF.action.name in ['add', 'del', 'old']:
LOG.debug("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'",
{"action": CONF.action.name,
"mac": CONF.action.mac,
"ip": CONF.action.ip})
CONF.action.func(CONF.action.mac, CONF.action.ip)
else:
try:
network_id = int(os.environ.get('NETWORK_ID'))
except TypeError:
LOG.error(_LE("Environment variable 'NETWORK_ID' must be set."))
return(1)
print(init_leases(network_id))
rpc.cleanup()
| apache-2.0 | -5,825,263,893,857,775,000 | 31.951724 | 78 | 0.650481 | false |
ragnarstroberg/ragnar_imsrg | src/pybind11/tests/test_modules.py | 4 | 2208 |
def test_nested_modules():
import pybind11_tests
from pybind11_tests.submodule import submodule_func
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.submodule.__name__ == "pybind11_tests.submodule"
assert submodule_func() == "submodule_func()"
def test_reference_internal():
from pybind11_tests import ConstructorStats
from pybind11_tests.submodule import A, B
b = B()
assert str(b.get_a1()) == "A[1]"
assert str(b.a1) == "A[1]"
assert str(b.get_a2()) == "A[2]"
assert str(b.a2) == "A[2]"
b.a1 = A(42)
b.a2 = A(43)
assert str(b.get_a1()) == "A[42]"
assert str(b.a1) == "A[42]"
assert str(b.get_a2()) == "A[43]"
assert str(b.a2) == "A[43]"
astats, bstats = ConstructorStats.get(A), ConstructorStats.get(B)
assert astats.alive() == 2
assert bstats.alive() == 1
del b
assert astats.alive() == 0
assert bstats.alive() == 0
assert astats.values() == ['1', '2', '42', '43']
assert bstats.values() == []
assert astats.default_constructions == 0
assert bstats.default_constructions == 1
assert astats.copy_constructions == 0
assert bstats.copy_constructions == 0
# assert astats.move_constructions >= 0 # Don't invoke any
# assert bstats.move_constructions >= 0 # Don't invoke any
assert astats.copy_assignments == 2
assert bstats.copy_assignments == 0
assert astats.move_assignments == 0
assert bstats.move_assignments == 0
def test_importing():
from pybind11_tests import OD
from collections import OrderedDict
assert OD is OrderedDict
assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])"
def test_pydoc():
"""Pydoc needs to be able to provide help() for everything inside a pybind11 module"""
import pybind11_tests
import pydoc
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.__doc__ == "pybind11 test module"
assert pydoc.text.docmodule(pybind11_tests)
def test_duplicate_registration():
"""Registering two things with the same name"""
from pybind11_tests import duplicate_registration
assert duplicate_registration() == []
| gpl-2.0 | 5,341,537,252,236,190,000 | 30.098592 | 90 | 0.641757 | false |
brunobergher/dotfiles | sublime/pygments/all/pygments/lexers/textedit.py | 47 | 6057 | # -*- coding: utf-8 -*-
"""
pygments.lexers.textedit
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for languages related to text processing.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import RegexLexer, include, default, bygroups, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.python import PythonLexer
__all__ = ['AwkLexer', 'VimLexer']
class AwkLexer(RegexLexer):
"""
For Awk scripts.
.. versionadded:: 1.5
"""
name = 'Awk'
aliases = ['awk', 'gawk', 'mawk', 'nawk']
filenames = ['*.awk']
mimetypes = ['application/x-awk']
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'#.*$', Comment.Single)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'\B', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|\|\||&&|in\b|\$|!?~|'
r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(break|continue|do|while|exit|for|if|else|'
r'return)\b', Keyword, 'slashstartsregex'),
(r'function\b', Keyword.Declaration, 'slashstartsregex'),
(r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
r'length|match|split|sprintf|sub|substr|tolower|toupper|close|'
r'fflush|getline|next|nextfile|print|printf|strftime|systime|'
r'delete|system)\b', Keyword.Reserved),
(r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|'
r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|'
r'RSTART|RT|SUBSEP)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
.. versionadded:: 0.8
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
_python = r'py(?:t(?:h(?:o(?:n)?)?)?)?'
tokens = {
'root': [
(r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)',
bygroups(using(this), Keyword, Text, Operator, Text, Text,
using(PythonLexer), Text)),
(r'^([ \t:]*)(' + _python + r')([ \t])(.*)',
bygroups(using(this), Keyword, Text, using(PythonLexer))),
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(''|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vim_builtins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w:
return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
| mit | 828,015,412,117,578,400 | 34.840237 | 84 | 0.482252 | false |
tliber/scrapy | setup.py | 83 | 1605 | from os.path import dirname, join
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name='Scrapy',
version=version,
url='http://scrapy.org',
description='A high-level Web Crawling and Web Scraping framework',
long_description=open('README.rst').read(),
author='Scrapy developers',
maintainer='Pablo Hoffman',
maintainer_email='[email protected]',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['scrapy = scrapy.cmdline:execute']
},
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'Twisted>=10.0.0',
'w3lib>=1.8.0',
'queuelib',
'lxml',
'pyOpenSSL',
'cssselect>=0.9',
'six>=1.5.2',
'parsel>=0.9.3',
'PyDispatcher>=2.0.5',
'service_identity',
],
)
| bsd-3-clause | -827,700,076,703,634,400 | 30.470588 | 79 | 0.588785 | false |
oVirt/ovirt-hosted-engine-setup | src/plugins/gr-he-common/sanlock/lockspace.py | 1 | 2295 | #
# ovirt-hosted-engine-setup -- ovirt hosted engine setup
# Copyright (C) 2013-2017 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
sanlock lockspace initialization plugin.
"""
import gettext
from otopi import plugin
from otopi import util
from ovirt_hosted_engine_setup import constants as ohostedcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-hosted-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
sanlock lockspace initialization plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT
)
def _init(self):
# TODO: check what's still in use and remove everything else from here
self.environment.setdefault(
ohostedcons.SanlockEnv.SANLOCK_SERVICE,
ohostedcons.Defaults.DEFAULT_SANLOCK_SERVICE
)
self.environment.setdefault(
ohostedcons.SanlockEnv.LOCKSPACE_NAME,
ohostedcons.Defaults.DEFAULT_LOCKSPACE_NAME
)
self.environment.setdefault(
ohostedcons.StorageEnv.METADATA_VOLUME_UUID,
None
)
self.environment.setdefault(
ohostedcons.StorageEnv.METADATA_IMAGE_UUID,
None
)
self.environment.setdefault(
ohostedcons.StorageEnv.LOCKSPACE_VOLUME_UUID,
None
)
self.environment.setdefault(
ohostedcons.StorageEnv.LOCKSPACE_IMAGE_UUID,
None
)
# vim: expandtab tabstop=4 shiftwidth=4
| lgpl-2.1 | 7,988,931,822,207,785,000 | 28.423077 | 78 | 0.681917 | false |
emd/boutanalysis | varyped.py | 1 | 4568 | '''Tools for interacting with VARYPED model equilibria'''
import numpy as np
from sys import argv
import string
import copy
def create_db(file_path):
'''Create a dictionary from a VARYPED results text file.
Parameters:
file_path -- string, path to the text file containing VARYPED results.
The first line of the file should begin with "VARYPED RESULTS...",
the second line should give the column titles, e.g. i, pres, cur,
and the remaining lines should give the model equilibria values.
Returns:
a dictionary that will be keyed by the VARYPED model equlibria index
with values that are themselves dictionaries of the parameters of
the corresponding model equilibria.
'''
f = open(file_path)
# v will be keyed by the model equlibria index, and its values will be
# dictionaries of the parameters of the corresponding model equilibria
v = dict()
# Create a temporary dictionary
d = dict()
# Read the lines from f into the dictionary v
for linenum, line in enumerate(f):
if linenum == 1:
# The titles of each column will be used as dictionary keys
keys = line.split()
elif linenum > 1:
for i, val in enumerate(line.split()):
if keys[i] == 'i':
d[keys[i]] = int(val)
else:
d[keys[i]] = float(val)
v[d['i']] = copy.copy(d)
f.close()
return v
def find_eq(v, p = None, J = None):
'''Find VARYPED model equlibria indices with the specified parameters.
Parameters:
v -- dictionary, the keys will be the VARYPED model equilibria
indices, and the value pairs will themselves be dictionaries
of the parameters of the corresponding equilibria
p -- scalar, specifying the fraction of the experimental pressure
gradient. To find the VARYPED model equilibria indices with
75% of the experimental pressure gradient, specify p = 0.75, etc
J -- scalar, specifying the fraction of the experimental edge current
density. To find the VARYPED model equilibria indices with
110% of the experimental edge current density, specify J = 1.10
Returns:
The indices of the VARYPED equilibria with the specified pressure
gradients and edge current densities.
'''
if p is None and J is None:
print 'No parameters specified. Returning NoneType object.'
return None
# Sort through the dictionary v for model equilibria that have the
# specified parameters and add their index to the list ind.
# NOTE: We also only count equilibria that converged (convrg = 1),
# as unconverged equilibria are useless to us.
ind = list()
for eq in v.keys():
if p is None:
if v[eq]['cur'] == J and v[eq]['convrg'] == 1:
ind.append(eq)
elif J is None:
if v[eq]['pres'] == p and v[eq]['convrg'] == 1:
ind.append(eq)
elif v[eq]['pres'] == p and v[eq]['cur'] == J and v[eq]['convrg'] == 1:
ind.append(eq)
return ind
def get_params(v, ind):
'''Get model parameters for specified VARYPED model equilibria.
Parameters:
v -- dictionary, the keys will be the VARYPED model equilibria
indices, and the value pairs will themselves be dictionaries
of the parameters of the corresponding equilibria
ind -- list, the index of equilibria to obtain parameters for
Returns:
params -- list, with each line giving an ordered pair (p, J), where
p is the fraction of the experimental pressure gradient and
J is the fraction of the experimental edge current density.
'''
params = list()
for i in ind:
params.append((v[i]['pres'], v[i]['cur']))
return params
if __name__ == '__main__':
file_path, p, J = argv[1:4]
if p == 'None':
p = None
else:
p = float(p)
if J == 'None':
J = None
else:
J = float(J)
v = create_db(file_path)
ind = find_eq(v, p, J)
params = get_params(v, ind)
if ind is not None:
column = '{:<6}'
print '\nVARYPED Equilibria:'
print column.format('i') + column.format('p') + column.format('J')
print '-'*15
for i in range(len(ind)):
out = (column.format(str(ind[i]))
+ column.format(str(params[i][0]))
+ column.format(str(params[i][1])))
print out
print ''
| lgpl-3.0 | -6,014,521,447,447,135,000 | 31.863309 | 79 | 0.607268 | false |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/tseries/tests/test_base.py | 9 | 82416 | from __future__ import print_function
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assertIsInstance
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
import pandas.tseries.offsets as offsets
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEqual(s.year,2000)
self.assertEqual(s.month,1)
self.assertEqual(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4, name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03','2012-01-04'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name', tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H', name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00', '2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00', '2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='H')""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns, US/Eastern]', freq=None)""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00+00:00', '2011-01-01 10:00:00+00:00', 'NaT'], dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00
Freq: H"""
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng - delta
expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz='Asia/Tokyo', name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz='Asia/Tokyo', name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_take(self):
#GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', '-3D',
'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ ]
def test_ops_properties(self):
self.check_ops_properties(['days','hours','minutes','seconds','milliseconds'])
self.check_ops_properties(['microseconds','nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'),
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = """TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"""
exp3 = """TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"""
exp4 = """TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq='D')"""
exp5 = """TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', '3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"""
with pd.option_context('display.width',300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width',300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = """TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
rng = timedelta_range('1 days','10 days',name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda : rng * offset)
# divide
expected = Int64Index((np.arange(10)+1)*12,name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda : rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda : tdi - dt)
self.assertRaises(TypeError, lambda : tdi - dti)
self.assertRaises(TypeError, lambda : td - dt)
self.assertRaises(TypeError, lambda : td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101',periods=3)
ts = Timestamp('20130101')
dt = ts.to_datetime()
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_datetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result,expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda : dt_tz - ts)
self.assertRaises(TypeError, lambda : dt_tz - dt)
self.assertRaises(TypeError, lambda : dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda : dt - dt_tz)
self.assertRaises(TypeError, lambda : ts - dt_tz)
self.assertRaises(TypeError, lambda : ts_tz2 - ts)
self.assertRaises(TypeError, lambda : ts_tz2 - dt)
self.assertRaises(TypeError, lambda : ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda : dti - ts_tz)
self.assertRaises(TypeError, lambda : dti_tz - ts)
self.assertRaises(TypeError, lambda : dti_tz - ts_tz2)
result = dti_tz-dt_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = dt_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti_tz-ts_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = ts_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(['20121231','20130101','20130102'],tz='US/Eastern')
tm.assert_index_equal(result,expected)
def test_dti_dti_deprecated_ops(self):
# deprecated in 0.16.0 (GH9094)
# change to return subtraction -> TimeDeltaIndex in 0.17.0
# shoudl move to the appropriate sections above
dti = date_range('20130101',periods=3)
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
with tm.assert_produces_warning(FutureWarning):
result = dti-dti
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti+dti
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti_tz
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz+dti_tz
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti-dti_tz
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti_tz+dti)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti+dti_tz)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda : tdi + dti[0:1])
self.assertRaises(ValueError, lambda : tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3]))
# this is a union!
#self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
'1 days 08:00:00', '1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
#GH 9680
tdi = pd.timedelta_range(start=0,periods=10,freq='1s')
ts = pd.Series(np.random.normal(size=10),index=tdi)
self.assertNotIn('foo',ts.__dict__.keys())
self.assertRaises(AttributeError,lambda : ts.foo)
def test_order(self):
#GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx')
idx2 = TimedeltaIndex(['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
'3 day', '5 day'], name='idx2')
idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
'2 minute', pd.NaT], name='idx3')
exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
'5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_take(self):
#GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex([], dtype='int64', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='int64', freq='D')"""
exp3 = """PeriodIndex(['2011-01-01', '2011-01-02'], dtype='int64', freq='D')"""
exp4 = """PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='int64', freq='D')"""
exp5 = """PeriodIndex(['2011', '2012', '2013'], dtype='int64', freq='A-DEC')"""
exp6 = """PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], dtype='int64', freq='H')"""
exp7 = """PeriodIndex(['2013Q1'], dtype='int64', freq='Q-DEC')"""
exp8 = """PeriodIndex(['2013Q1', '2013Q2'], dtype='int64', freq='Q-DEC')"""
exp9 = """PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='int64', freq='Q-DEC')"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH 6527
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3),
np.timedelta64(72, 'h'), Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120),
np.timedelta64(120, 'm'), Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
'2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012', '2011'], name='pidx', freq='A')
pexpected = PeriodIndex(['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx', freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertEqual(ordered.freq, 'D')
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx[-1]
self.assertEqual(result, pd.Period('2011-01-31', freq='D'))
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_take(self):
#GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx.take([5])
self.assertEqual(result, pd.Period('2011-01-06', freq='D'))
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| gpl-2.0 | 5,401,510,452,408,450,000 | 42.9552 | 174 | 0.548267 | false |
maistrovas/My-Courses-Solutions | Coursera Algorithmic Thinking (Part 1)/Module 2/Application/Application2.py | 1 | 7494 | """
Provided code for Application portion of Module 2
Answers 4/6
Application Grade is 13 out of 15
Text Answers
-Question 2:
All three graphs are resilient in this case.
Question5:
-UPA and ER graphs are steel resilient
(UPA is very close to overcoming 25% roughnes)
in this type of attack.
"""
# general imports
import urllib2
import random
import timeit
import time
import math
import UPA
from collections import deque
from random import shuffle
import BFS_project as project
import matplotlib.pyplot as plt
import numpy as np
# CodeSkulptor import
#import simpleplot
#import codeskulptor
#codeskulptor.set_timeout(60)
# Desktop imports
#import matplotlib.pyplot as plt
############################################
# Provided code
def copy_graph(graph):
"""
Make a copy of a graph
"""
new_graph = {}
for node in graph:
new_graph[node] = set(graph[node])
return new_graph
def delete_node(ugraph, node):
"""
Delete a node from an undirected graph
"""
neighbors = ugraph[node]
ugraph.pop(node)
for neighbor in neighbors:
ugraph[neighbor].remove(node)
def targeted_order(ugraph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = copy_graph(ugraph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##########################################################
# Code for loading computer network graph
NETWORK_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_rf7.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
counter = 0
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
counter +=1
answer_graph[node].add(int(neighbor))
print 'Number network edges = ', counter / 2
return answer_graph
def er_graph(n, p):
'''
implementation of ER algorithm
n - final number of nodes
p - probability
'''
graph = {key: set() for key in xrange(n)}
counter = 0
for i in xrange(n):
for j in xrange(n):
if i == j:
continue
if random.random() < p:
counter += 1
graph[i].add(j)
graph[j].add(i)
print 'Number of ER-edges=', counter
return graph
##UPA-Algorithm
def algorithm_upa(n, m):
'''
implementation of UPA algorithm
n - final number of nodes
m - number of existing nodes
p - probability for er_graph
'''
graph = er_graph(m, 1)
upa = UPA.UPATrial(m)
counter = 0
for i in xrange(m, n):
new_edges = upa.run_trial(m)
graph[i] = new_edges
for node in new_edges:
graph[node].add(i)
return graph
def random_order(graph):
'''
takes a graph and returns a list
of the nodes in the graph in some random order
'''
result = deque()
for node in graph:
result.append(node)
shuffle(result)
return result
loaded_graph = load_graph(NETWORK_URL)
er_ggraph = er_graph(1239, 0.004)
upa_graph = algorithm_upa(1239, 3)
def count_Uedges(ugraph):
'''
count edges in the graph
'''
counter = 0
for i in ugraph:
for j in ugraph[i]:
counter +=1
return counter/2
# print 'UPA edges = ', count_Uedges(upa_graph)
# print 'ER edges =', count_Uedges(er_ggraph)
# print 'Network graph edges =', count_Uedges(loaded_graph)
def plotting(net_g, er_g, upa_g, question):
"""
Plot an example with two curves with legends
x - number of nodes removed
y - size of the largest connect component
in the graphs resulting from the node removal.
"""
if question == 1:
print 'The function plots question 1'
network_order = random_order(net_g)
er_order = random_order(er_g)
upa_order = random_order(upa_g)
if question == 4:
print 'The function plots question 4'
network_order = targeted_order(net_g)
er_order = targeted_order(er_g)
upa_order = targeted_order(upa_g)
network_resil = project.compute_resilience(net_g, network_order)
er_resil = project.compute_resilience(er_g, er_order)
upa_resil = project.compute_resilience(upa_g, upa_order)
xvals_net = np.array([node for node in range(len(network_order) +1 )])
xvals_er = np.array([node for node in range(len(er_order) +1 )])
xvals_upa = np.array([node for node in range(len(upa_order) +1 )])
yvals_net = np.array(network_resil)
yvals_er = np.array(er_resil)
yvals_upa = np.array(upa_resil)
plt.figure('Application2 Plot')
plt.title('Resilience comparison')
plt.xlabel('Removed nodes')
plt.ylabel('Largest conected component')
plt.plot(xvals_net, yvals_net, '-b', label='Network-Data')
plt.plot(xvals_er, yvals_er, '-r', label='ER-Algorithm (p = 0.004)')
plt.plot(xvals_upa, yvals_upa, '-g', label='UPA-Algorithm (m = 3)')
plt.legend(loc='upper right')
plt.show()
'''
Questions 1,4
'''
plotting(loaded_graph, er_ggraph, upa_graph, 1)
#plotting(loaded_graph, er_ggraph, upa_graph, 4)
def measure_targeted_order(n, m, func):
graph = algorithm_upa(n, m)
return timeit.timeit(lambda: func(graph), number=1)
def fast_targeted_order(ugraph):
'''
comment
'''
ugraph = copy_graph(ugraph)
N = len(ugraph)
degree_sets = [set()] * N
for node, neighbors in ugraph.iteritems():
degree = len(neighbors)
degree_sets[degree].add(node)
order = []
for k in range(N - 1, -1, -1):
while degree_sets[k]:
u = degree_sets[k].pop()
for neighbor in ugraph[u]:
d = len(ugraph[neighbor])
degree_sets[d].remove(neighbor)
degree_sets[d - 1].add(neighbor)
order.append(u)
delete_node(ugraph, u)
return order
def question3():
'''
Function plotting Question 3
'''
xs = range(10, 1000, 10)
m = 5
ys_tagreted = [measure_targeted_order(n, m, targeted_order) for n in xs]
ys_fast_targeted = [measure_targeted_order(n, m, fast_targeted_order) for n in xs]
plt.plot(xs, ys_tagreted, '-r', label='targeted_order')
plt.plot(xs, ys_fast_targeted, '-b', label='fast_targeted_order')
plt.title('Targeted order functions performance (desktop Python)')
plt.xlabel('Number of nodes in the graph')
plt.ylabel('Execution time')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
'''
Question3
Include only plotting
'''
question3()
| mit | 4,006,435,012,310,974,500 | 25.387324 | 86 | 0.60822 | false |
blink1073/scikit-image | skimage/color/tests/test_adapt_rgb.py | 19 | 2591 | from functools import partial
import numpy as np
from skimage import img_as_float, img_as_uint
from skimage import color, data, filters
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage._shared._warnings import expected_warnings
# Down-sample image for quicker testing.
COLOR_IMAGE = data.astronaut()[::5, ::5]
GRAY_IMAGE = data.camera()[::5, ::5]
SIGMA = 3
smooth = partial(filters.gaussian, sigma=SIGMA)
assert_allclose = partial(np.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
with expected_warnings(['precision loss']):
return img_as_uint(filters.sobel(image))
def test_gray_scale_image():
# We don't need to test both `hsv_value` and `each_channel` since
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
# a dtype mismatch.
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
# Reduce tolerance because dtype conversion.
assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
| bsd-3-clause | 7,656,706,217,985,837,000 | 29.845238 | 79 | 0.700502 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.