repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
smorante/continuous-goal-directed-actions
|
demonstration-feature-selection/src/alternatives/main_dtw_mds_norm.py
|
2
|
3731
|
# -*- coding: utf-8 -*-
"""
Author: Santiago Morante
Robotics Lab. Universidad Carlos III de Madrid
"""
########################## DTW ####################################
import libmddtw
import matplotlib.pyplot as plt
from dtw import dtw
########################## MDS ####################################
import numpy as np
from sklearn.metrics import euclidean_distances
import libmds
########################## DBSCAN ####################################
import libdbscan
from sklearn.preprocessing import StandardScaler # to normalize
def normalize(X):
return StandardScaler().fit_transform(X)
def main():
NUMBER_OF_DEMONSTRATIONS=5
##########################################################################
########################## DTW ####################################
##########################################################################
dist=np.zeros((NUMBER_OF_DEMONSTRATIONS,NUMBER_OF_DEMONSTRATIONS))
demons=[]
# fill demonstrations
for i in range(NUMBER_OF_DEMONSTRATIONS):
demons.append(np.matrix([ np.sin(np.arange(15+i)+i) , np.sin(np.arange(15+i)+i)]))
# fill distance matrix
for i in range(NUMBER_OF_DEMONSTRATIONS):
for j in range(NUMBER_OF_DEMONSTRATIONS):
mddtw = libmddtw.Mddtw()
x,y = mddtw.collapseRows(demons[i],demons[j])
#fig = plt.figure()
#plt.plot(x)
#plt.plot(y)
singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j])
dist[i][j]=singleDist
# print 'Minimum distance found:', singleDist
#fig = plt.figure()
# plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest')
# plt.plot(path[0], path[1], 'w')
# plt.xlim((-0.5, cost.shape[0]-0.5))
# plt.ylim((-0.5, cost.shape[1]-0.5))
# print "dist", dist
###########################################################################
########################### MDS ####################################
###########################################################################
md = libmds.Mds()
md.create(n_components=1, metric=False, max_iter=3000, eps=1e-9, random_state=None,
dissimilarity="precomputed", n_jobs=1)
points = md.compute(dist)
print "points", points.flatten()
# md.plot()
##########################################################################
########################## norm ####################################
##########################################################################
from scipy.stats import norm
from numpy import linspace
from pylab import plot,show,hist,figure,title
param = norm.fit(points.flatten()) # distribution fitting
x = linspace(np.min(points),np.max(points),NUMBER_OF_DEMONSTRATIONS)
pdf_fitted = norm.pdf(x, loc=param[0],scale=param[1])
fig = plt.figure()
title('Normal distribution')
plot(x,pdf_fitted,'r-')
hist(points.flatten(),normed=1,alpha=.3)
show()
for elem in points:
if elem <= np.mean(points):
print "probability of point ", str(elem), ": ", norm.cdf(elem, loc=param[0],scale=param[1])
if elem > np.mean(points):
print "probability of point ", str(elem), ": ", 1-norm.cdf(elem, loc=param[0],scale=param[1])
##############################################################################
##############################################################################
if __name__ == "__main__":
main()
|
mit
| 4,829,986,098,484,328,000 | 4,377,367,046,809,680,000 | 33.555556 | 105 | 0.425355 | false |
brunosantos/Bsan-kodi-repo
|
plugin.video.kodi/dns/rdtypes/ANY/RRSIG.py
|
15
|
5774
|
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import calendar
import struct
import time
import dns.dnssec
import dns.exception
import dns.rdata
import dns.rdatatype
class BadSigTime(dns.exception.DNSException):
"""Raised when a SIG or RRSIG RR's time cannot be parsed."""
pass
def sigtime_to_posixtime(what):
if len(what) != 14:
raise BadSigTime
year = int(what[0:4])
month = int(what[4:6])
day = int(what[6:8])
hour = int(what[8:10])
minute = int(what[10:12])
second = int(what[12:14])
return calendar.timegm((year, month, day, hour, minute, second,
0, 0, 0))
def posixtime_to_sigtime(what):
return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
class RRSIG(dns.rdata.Rdata):
"""RRSIG record
@ivar type_covered: the rdata type this signature covers
@type type_covered: int
@ivar algorithm: the algorithm used for the sig
@type algorithm: int
@ivar labels: number of labels
@type labels: int
@ivar original_ttl: the original TTL
@type original_ttl: long
@ivar expiration: signature expiration time
@type expiration: long
@ivar inception: signature inception time
@type inception: long
@ivar key_tag: the key tag
@type key_tag: int
@ivar signer: the signer
@type signer: dns.name.Name object
@ivar signature: the signature
@type signature: string"""
__slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
'expiration', 'inception', 'key_tag', 'signer',
'signature']
def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
original_ttl, expiration, inception, key_tag, signer,
signature):
super(RRSIG, self).__init__(rdclass, rdtype)
self.type_covered = type_covered
self.algorithm = algorithm
self.labels = labels
self.original_ttl = original_ttl
self.expiration = expiration
self.inception = inception
self.key_tag = key_tag
self.signer = signer
self.signature = signature
def covers(self):
return self.type_covered
def to_text(self, origin=None, relativize=True, **kw):
return '%s %d %d %d %s %s %d %s %s' % (
dns.rdatatype.to_text(self.type_covered),
self.algorithm,
self.labels,
self.original_ttl,
posixtime_to_sigtime(self.expiration),
posixtime_to_sigtime(self.inception),
self.key_tag,
self.signer.choose_relativity(origin, relativize),
dns.rdata._base64ify(self.signature)
)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
type_covered = dns.rdatatype.from_text(tok.get_string())
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
labels = tok.get_int()
original_ttl = tok.get_ttl()
expiration = sigtime_to_posixtime(tok.get_string())
inception = sigtime_to_posixtime(tok.get_string())
key_tag = tok.get_int()
signer = tok.get_name()
signer = signer.choose_relativity(origin, relativize)
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
signature = b64.decode('base64_codec')
return cls(rdclass, rdtype, type_covered, algorithm, labels,
original_ttl, expiration, inception, key_tag, signer,
signature)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack('!HBBIIIH', self.type_covered,
self.algorithm, self.labels,
self.original_ttl, self.expiration,
self.inception, self.key_tag)
file.write(header)
self.signer.to_wire(file, None, origin)
file.write(self.signature)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
header = struct.unpack('!HBBIIIH', wire[current : current + 18])
current += 18
rdlen -= 18
(signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if not origin is None:
signer = signer.relativize(origin)
signature = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2],
header[3], header[4], header[5], header[6], signer,
signature)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.signer = self.signer.choose_relativity(origin, relativize)
def _cmp(self, other):
return self._wire_cmp(other)
|
gpl-2.0
| 3,941,388,866,427,267,600 | 5,161,370,198,076,087,000 | 36.251613 | 79 | 0.619155 | false |
stargaser/astropy
|
astropy/coordinates/tests/accuracy/test_altaz_icrs.py
|
4
|
8535
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for AltAz to ICRS coordinate transformations.
We use "known good" examples computed with other coordinate libraries.
Note that we use very low precision asserts because some people run tests on 32-bit
machines and we want the tests to pass there.
TODO: check if these tests pass on 32-bit machines and implement
higher-precision checks on 64-bit machines.
"""
import pytest
from astropy import units as u
from astropy.time import Time
from astropy.coordinates.builtin_frames import AltAz
from astropy.coordinates import EarthLocation
from astropy.coordinates import Angle, SkyCoord
@pytest.mark.remote_data
def test_against_hor2eq():
"""Check that Astropy gives consistent results with an IDL hor2eq example.
See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro
Test is against these run outputs, run at 2000-01-01T12:00:00:
# NORMAL ATMOSPHERE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 53 40 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 30.1 (hh:mm:ss)
Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000)
IDL> print, ra, dec
114.23004 15.418818
# NO PRESSURE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 54 41 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 26.4 (hh:mm:ss)
Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000)
IDL> print, ra, dec
114.24554 15.427022
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(lon=Angle('-111d36.0m'),
lat=Angle('31d57.8m'),
height=2120. * u.m)
obstime = Time(2451545.0, format='jd', scale='ut1')
altaz_frame = AltAz(obstime=obstime, location=location,
temperature=0 * u.deg_C, pressure=0.781 * u.bar)
altaz_frame_noatm = AltAz(obstime=obstime, location=location,
temperature=0 * u.deg_C, pressure=0.0 * u.bar)
altaz = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame)
altaz_noatm = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame_noatm)
radec_frame = 'icrs'
radec_actual = altaz.transform_to(radec_frame)
radec_actual_noatm = altaz_noatm.transform_to(radec_frame)
radec_expected = SkyCoord('07h36m55.2s +15d25m08s', frame=radec_frame)
distance = radec_actual.separation(radec_expected).to('arcsec')
# this comes from running the example hor2eq but with the pressure set to 0
radec_expected_noatm = SkyCoord('07h36m58.9s +15d25m37s', frame=radec_frame)
distance_noatm = radec_actual_noatm.separation(radec_expected_noatm).to('arcsec')
# The baseline difference is ~2.3 arcsec with one atm of pressure. The
# difference is mainly due to the somewhat different atmospheric model that
# hor2eq assumes. This is confirmed by the second test which has the
# atmosphere "off" - the residual difference is small enough to be embedded
# in the assumptions about "J2000" or rounding errors.
assert distance < 5 * u.arcsec
assert distance_noatm < 0.4 * u.arcsec
@pytest.mark.remote_data
def test_against_pyephem():
"""Check that Astropy gives consistent results with one PyEphem example.
PyEphem: http://rhodesmill.org/pyephem/
See example input and output here:
https://gist.github.com/zonca/1672906
https://github.com/phn/pytpm/issues/2#issuecomment-3698679
"""
obstime = Time('2011-09-18 08:50:00')
location = EarthLocation(lon=Angle('-109d24m53.1s'),
lat=Angle('33d41m46.0s'),
height=30000. * u.m)
# We are using the default pressure and temperature in PyEphem
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(obstime=obstime, location=location,
temperature=15 * u.deg_C, pressure=1.010 * u.bar)
altaz = SkyCoord('6.8927d -60.7665d', frame=altaz_frame)
radec_actual = altaz.transform_to('icrs')
radec_expected = SkyCoord('196.497518d -4.569323d', frame='icrs') # EPHEM
# radec_expected = SkyCoord('196.496220d -4.569390d', frame='icrs') # HORIZON
distance = radec_actual.separation(radec_expected).to('arcsec')
# TODO: why is this difference so large?
# It currently is: 31.45187984720655 arcsec
assert distance < 1e3 * u.arcsec
# Add assert on current Astropy result so that we notice if something changes
radec_expected = SkyCoord('196.495372d -4.560694d', frame='icrs')
distance = radec_actual.separation(radec_expected).to('arcsec')
# Current value: 0.0031402822944751997 arcsec
assert distance < 1 * u.arcsec
@pytest.mark.remote_data
def test_against_jpl_horizons():
"""Check that Astropy gives consistent results with the JPL Horizons example.
The input parameters and reference results are taken from this page:
(from the first row of the Results table at the bottom of that page)
http://ssd.jpl.nasa.gov/?horizons_tutorial
"""
obstime = Time('1998-07-28 03:00')
location = EarthLocation(lon=Angle('248.405300d'),
lat=Angle('31.9585d'),
height=2.06 * u.km)
# No atmosphere
altaz_frame = AltAz(obstime=obstime, location=location)
altaz = SkyCoord('143.2970d 2.6223d', frame=altaz_frame)
radec_actual = altaz.transform_to('icrs')
radec_expected = SkyCoord('19h24m55.01s -40d56m28.9s', frame='icrs')
distance = radec_actual.separation(radec_expected).to('arcsec')
# Current value: 0.238111 arcsec
assert distance < 1 * u.arcsec
@pytest.mark.remote_data
@pytest.mark.xfail(reason="Current output is completely incorrect")
def test_fk5_equinox_and_epoch_j2000_0_to_topocentric_observed():
"""
http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(lon=Angle('-111.598333d'),
lat=Angle('31.956389d'),
height=2093.093 * u.m) # TODO: height correct?
obstime = Time('2010-01-01 12:00:00')
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(obstime=obstime, location=location,
temperature=0 * u.deg_C, pressure=0.781 * u.bar)
radec = SkyCoord('12h22m54.899s 15d49m20.57s', frame='fk5')
altaz_actual = radec.transform_to(altaz_frame)
altaz_expected = SkyCoord('264d55m06s 37d54m41s', frame='altaz')
# altaz_expected = SkyCoord('343.586827647d 15.7683070508d', frame='altaz')
# altaz_expected = SkyCoord('133.498195532d 22.0162383595d', frame='altaz')
distance = altaz_actual.separation(altaz_expected)
# print(altaz_actual)
# print(altaz_expected)
# print(distance)
"""TODO: Current output is completely incorrect ... xfailing this test for now.
<SkyCoord (AltAz: obstime=2010-01-01 12:00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron):00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=133.4869896371561 deg, alt=67.97857990957701 deg>
<SkyCoord (AltAz: obstime=None, location=None, pressure=0.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=264.91833333333335 deg, alt=37.91138888888889 deg>
68d02m45.732s
"""
assert distance < 1 * u.arcsec
|
bsd-3-clause
| -2,548,433,149,477,834,000 | -3,273,978,498,428,403,700 | 43.685864 | 431 | 0.660457 | false |
jfarrell/thrift
|
test/py/SerializationTest.py
|
21
|
17161
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from ThriftTest.ttypes import (
Bonk,
Bools,
LargeDeltas,
ListBonks,
NestedListsBonk,
NestedListsI32x2,
NestedListsI32x3,
NestedMixedx2,
Numberz,
VersioningTestV1,
VersioningTestV2,
Xtruct,
Xtruct2,
)
from Recursive.ttypes import RecTree
from Recursive.ttypes import RecList
from Recursive.ttypes import CoRec
from Recursive.ttypes import CoRec2
from Recursive.ttypes import VectorTest
from DebugProtoTest.ttypes import CompactProtoTestStruct, Empty
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TCompactProtocol, TJSONProtocol
from thrift.TSerialization import serialize, deserialize
import sys
import unittest
class AbstractTest(unittest.TestCase):
def setUp(self):
self.v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321,
)
self.v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7, 8, 9],
newset=set([42, 1, 8]),
newmap={1: 2, 2: 3},
newstring="Hola!",
end_in_both=54321,
)
self.bools = Bools(im_true=True, im_false=False)
self.bools_flipped = Bools(im_true=False, im_false=True)
self.large_deltas = LargeDeltas(
b1=self.bools,
b10=self.bools_flipped,
b100=self.bools,
check_true=True,
b1000=self.bools_flipped,
check_false=False,
vertwo2000=VersioningTestV2(newstruct=Bonk(message='World!', type=314)),
a_set2500=set(['lazy', 'brown', 'cow']),
vertwo3000=VersioningTestV2(newset=set([2, 3, 5, 7, 11])),
big_numbers=[2 ** 8, 2 ** 16, 2 ** 31 - 1, -(2 ** 31 - 1)]
)
self.compact_struct = CompactProtoTestStruct(
a_byte=127,
a_i16=32000,
a_i32=1000000000,
a_i64=0xffffffffff,
a_double=5.6789,
a_string="my string",
true_field=True,
false_field=False,
empty_struct_field=Empty(),
byte_list=[-127, -1, 0, 1, 127],
i16_list=[-1, 0, 1, 0x7fff],
i32_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0x7fffffff],
i64_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff],
double_list=[0.1, 0.2, 0.3],
string_list=["first", "second", "third"],
boolean_list=[True, True, True, False, False, False],
struct_list=[Empty(), Empty()],
byte_set=set([-127, -1, 0, 1, 127]),
i16_set=set([-1, 0, 1, 0x7fff]),
i32_set=set([1, 2, 3]),
i64_set=set([-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff]),
double_set=set([0.1, 0.2, 0.3]),
string_set=set(["first", "second", "third"]),
boolean_set=set([True, False]),
# struct_set=set([Empty()]), # unhashable instance
byte_byte_map={1: 2},
i16_byte_map={1: 1, -1: 1, 0x7fff: 1},
i32_byte_map={1: 1, -1: 1, 0x7fffffff: 1},
i64_byte_map={0: 1, 1: 1, -1: 1, 0x7fffffffffffffff: 1},
double_byte_map={-1.1: 1, 1.1: 1},
string_byte_map={"first": 1, "second": 2, "third": 3, "": 0},
boolean_byte_map={True: 1, False: 0},
byte_i16_map={1: 1, 2: -1, 3: 0x7fff},
byte_i32_map={1: 1, 2: -1, 3: 0x7fffffff},
byte_i64_map={1: 1, 2: -1, 3: 0x7fffffffffffffff},
byte_double_map={1: 0.1, 2: -0.1, 3: 1000000.1},
byte_string_map={1: "", 2: "blah", 3: "loooooooooooooong string"},
byte_boolean_map={1: True, 2: False},
# list_byte_map # unhashable
# set_byte_map={set([1, 2, 3]) : 1, set([0, 1]) : 2, set([]) : 0}, # unhashable
# map_byte_map # unhashable
byte_map_map={0: {}, 1: {1: 1}, 2: {1: 1, 2: 2}},
byte_set_map={0: set([]), 1: set([1]), 2: set([1, 2])},
byte_list_map={0: [], 1: [1], 2: [1, 2]},
)
self.nested_lists_i32x2 = NestedListsI32x2(
[
[1, 1, 2],
[2, 7, 9],
[3, 5, 8]
]
)
self.nested_lists_i32x3 = NestedListsI32x3(
[
[
[2, 7, 9],
[3, 5, 8]
],
[
[1, 1, 2],
[1, 4, 9]
]
]
)
self.nested_mixedx2 = NestedMixedx2(int_set_list=[
set([1, 2, 3]),
set([1, 4, 9]),
set([1, 2, 3, 5, 8, 13, 21]),
set([-1, 0, 1])
],
# note, the sets below are sets of chars, since the strings are iterated
map_int_strset={10: set('abc'), 20: set('def'), 30: set('GHI')},
map_int_strset_list=[
{10: set('abc'), 20: set('def'), 30: set('GHI')},
{100: set('lmn'), 200: set('opq'), 300: set('RST')},
{1000: set('uvw'), 2000: set('wxy'), 3000: set('XYZ')}]
)
self.nested_lists_bonk = NestedListsBonk(
[
[
[
Bonk(message='inner A first', type=1),
Bonk(message='inner A second', type=1)
],
[
Bonk(message='inner B first', type=2),
Bonk(message='inner B second', type=2)
]
]
]
)
self.list_bonks = ListBonks(
[
Bonk(message='inner A', type=1),
Bonk(message='inner B', type=2),
Bonk(message='inner C', type=0)
]
)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testForwards(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v1obj))
self.assertEquals(obj.begin_in_both, self.v1obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v1obj.end_in_both)
def testBackwards(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v2obj))
self.assertEquals(obj.begin_in_both, self.v2obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v2obj.end_in_both)
def testSerializeV1(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v1obj))
self.assertEquals(obj, self.v1obj)
def testSerializeV2(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v2obj))
self.assertEquals(obj, self.v2obj)
def testBools(self):
self.assertNotEquals(self.bools, self.bools_flipped)
self.assertNotEquals(self.bools, self.v1obj)
obj = self._deserialize(Bools, self._serialize(self.bools))
self.assertEquals(obj, self.bools)
obj = self._deserialize(Bools, self._serialize(self.bools_flipped))
self.assertEquals(obj, self.bools_flipped)
rep = repr(self.bools)
self.assertTrue(len(rep) > 0)
def testLargeDeltas(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(LargeDeltas, self._serialize(self.large_deltas))
self.assertEquals(obj, self.large_deltas)
rep = repr(self.large_deltas)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x2(self):
obj = self._deserialize(NestedListsI32x2, self._serialize(self.nested_lists_i32x2))
self.assertEquals(obj, self.nested_lists_i32x2)
rep = repr(self.nested_lists_i32x2)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x3(self):
obj = self._deserialize(NestedListsI32x3, self._serialize(self.nested_lists_i32x3))
self.assertEquals(obj, self.nested_lists_i32x3)
rep = repr(self.nested_lists_i32x3)
self.assertTrue(len(rep) > 0)
def testNestedMixedx2(self):
obj = self._deserialize(NestedMixedx2, self._serialize(self.nested_mixedx2))
self.assertEquals(obj, self.nested_mixedx2)
rep = repr(self.nested_mixedx2)
self.assertTrue(len(rep) > 0)
def testNestedListsBonk(self):
obj = self._deserialize(NestedListsBonk, self._serialize(self.nested_lists_bonk))
self.assertEquals(obj, self.nested_lists_bonk)
rep = repr(self.nested_lists_bonk)
self.assertTrue(len(rep) > 0)
def testListBonks(self):
obj = self._deserialize(ListBonks, self._serialize(self.list_bonks))
self.assertEquals(obj, self.list_bonks)
rep = repr(self.list_bonks)
self.assertTrue(len(rep) > 0)
def testCompactStruct(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(CompactProtoTestStruct, self._serialize(self.compact_struct))
self.assertEquals(obj, self.compact_struct)
rep = repr(self.compact_struct)
self.assertTrue(len(rep) > 0)
def testIntegerLimits(self):
if (sys.version_info[0] == 2 and sys.version_info[1] <= 6):
print('Skipping testIntegerLimits for Python 2.6')
return
bad_values = [CompactProtoTestStruct(a_byte=128), CompactProtoTestStruct(a_byte=-129),
CompactProtoTestStruct(a_i16=32768), CompactProtoTestStruct(a_i16=-32769),
CompactProtoTestStruct(a_i32=2147483648), CompactProtoTestStruct(a_i32=-2147483649),
CompactProtoTestStruct(a_i64=9223372036854775808), CompactProtoTestStruct(a_i64=-9223372036854775809)
]
for value in bad_values:
self.assertRaises(Exception, self._serialize, value)
def testRecTree(self):
"""Ensure recursive tree node can be created."""
children = []
for idx in range(1, 5):
node = RecTree(item=idx, children=None)
children.append(node)
parent = RecTree(item=0, children=children)
serde_parent = self._deserialize(RecTree, self._serialize(parent))
self.assertEquals(0, serde_parent.item)
self.assertEquals(4, len(serde_parent.children))
for child in serde_parent.children:
# Cannot use assertIsInstance in python 2.6?
self.assertTrue(isinstance(child, RecTree))
def _buildLinkedList(self):
head = cur = RecList(item=0)
for idx in range(1, 5):
node = RecList(item=idx)
cur.nextitem = node
cur = node
return head
def _collapseLinkedList(self, head):
out_list = []
cur = head
while cur is not None:
out_list.append(cur.item)
cur = cur.nextitem
return out_list
def testRecList(self):
"""Ensure recursive linked list can be created."""
rec_list = self._buildLinkedList()
serde_list = self._deserialize(RecList, self._serialize(rec_list))
out_list = self._collapseLinkedList(serde_list)
self.assertEquals([0, 1, 2, 3, 4], out_list)
def testCoRec(self):
"""Ensure co-recursive structures can be created."""
item1 = CoRec()
item2 = CoRec2()
item1.other = item2
item2.other = item1
# NOTE [econner724,2017-06-21]: These objects cannot be serialized as serialization
# results in an infinite loop. fbthrift also suffers from this
# problem.
def testRecVector(self):
"""Ensure a list of recursive nodes can be created."""
mylist = [self._buildLinkedList(), self._buildLinkedList()]
myvec = VectorTest(lister=mylist)
serde_vec = self._deserialize(VectorTest, self._serialize(myvec))
golden_list = [0, 1, 2, 3, 4]
for cur_list in serde_vec.lister:
out_list = self._collapseLinkedList(cur_list)
self.assertEqual(golden_list, out_list)
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory(fallback=False)
class CompactProtocolTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class AcceleratedCompactTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False)
class JSONProtocolTest(AbstractTest):
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
class AcceleratedFramedTest(unittest.TestCase):
def testSplit(self):
"""Test FramedTransport and BinaryProtocolAccelerated
Tests that TBinaryProtocolAccelerated and TFramedTransport
play nicely together when a read spans a frame"""
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
bigstring = "".join(chr(byte) for byte in range(ord("a"), ord("z") + 1))
databuf = TTransport.TMemoryBuffer()
prot = protocol_factory.getProtocol(databuf)
prot.writeI32(42)
prot.writeString(bigstring)
prot.writeI16(24)
data = databuf.getvalue()
cutpoint = len(data) // 2
parts = [data[:cutpoint], data[cutpoint:]]
framed_buffer = TTransport.TMemoryBuffer()
framed_writer = TTransport.TFramedTransport(framed_buffer)
for part in parts:
framed_writer.write(part)
framed_writer.flush()
self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)
# Recreate framed_buffer so we can read from it.
framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())
framed_reader = TTransport.TFramedTransport(framed_buffer)
prot = protocol_factory.getProtocol(framed_reader)
self.assertEqual(prot.readI32(), 42)
self.assertEqual(prot.readString(), bigstring)
self.assertEqual(prot.readI16(), 24)
class SerializersTest(unittest.TestCase):
def testSerializeThenDeserialize(self):
obj = Xtruct2(i32_thing=1,
struct_thing=Xtruct(string_thing="foo"))
s1 = serialize(obj)
for i in range(10):
self.assertEquals(s1, serialize(obj))
objcopy = Xtruct2()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
obj = Xtruct(string_thing="bar")
objcopy = Xtruct()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test booleans
obj = Bools(im_true=True, im_false=False)
objcopy = Bools()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test enums
for num, name in Numberz._VALUES_TO_NAMES.items():
obj = Bonk(message='enum Numberz value %d is string %s' % (num, name), type=num)
objcopy = Bonk()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedCompactTest))
suite.addTest(loader.loadTestsFromTestCase(CompactProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(JSONProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedFramedTest))
suite.addTest(loader.loadTestsFromTestCase(SerializersTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
|
apache-2.0
| 2,513,282,579,917,732,000 | -2,066,869,113,711,043,800 | 36.551422 | 137 | 0.60014 | false |
jonls/lpd-monitor
|
btcon.py
|
1
|
16600
|
import socket
import struct
import random
import hashlib
import errno
from gi.repository import GLib
from gi.repository import GObject
from bencode import bencode, bdecode, bdecode_all
class Bitfield(object):
def __init__(self, size, data=None):
if size < 0:
raise ValueError('Bitfield size must be non-negative')
self._size = size
self._data = bytearray((size+7)//8)
if data is not None:
for i in range(self._size):
bi = i // 8
if ord(data[bi]) & (1 << (7 - (i % 8))):
self.set(i)
def set(self, index):
if index >= self._size or index < 0:
raise IndexError('Invalid Bitfield index: %d' % index)
bi = index // 8
self._data[bi] |= 1 << (7 - (index % 8))
def count(self):
return sum(self)
def __iter__(self):
for i in range(self._size):
bi = i // 8
yield bool(self._data[bi] & (1 << (7 - (i % 8))))
def __len__(self):
return self._size
def __repr__(self):
return 'Bitfield(%d, %r)' % (self._size, ''.join(chr(x) for x in self._data))
class BTConnectionError(Exception):
pass
class BTConnection(GObject.GObject):
__gsignals__ = {
'state-changed': (GObject.SIGNAL_RUN_LAST, None, (int,)),
'metadata-changed': (GObject.SIGNAL_RUN_LAST, None, ()),
'peer-progress-changed': (GObject.SIGNAL_RUN_LAST, None, ())
}
STATE_NOT_CONNECTED = 0
STATE_HEADERS = 1
STATE_EXT_HEADERS = 2
STATE_RUNNING = 3
STATE_CLOSED = 4
HEADERS_LENGTH = 68
BYTE_EXT_EXTENSION = 44
BYTE_EXT_FAST_PEERS = 62
MSG_TYPE_CHOKE = 0
MSG_TYPE_UNCHOKE = 1
MSG_TYPE_INTERESTED = 2
MSG_TYPE_NOT_INTERESTED = 3
MSG_TYPE_HAVE = 4
MSG_TYPE_BITFIELD = 5
MSG_TYPE_REQUEST = 6
MSG_TYPE_PIECE = 7
MSG_TYPE_CANCEL = 8
MSG_TYPE_HAVE_ALL = 14
MSG_TYPE_HAVE_NONE = 15
MSG_TYPE_EXTENDED = 20
def __init__(self, infohash, peer_id=None):
super(BTConnection, self).__init__()
self._infohash = infohash
self._my_id = peer_id or ''.join(chr(random.randint(0, 255)) for i in range(20))
self._my_exts = {1: 'ut_metadata'}
self._metadata = None
self._ut_metadata_size = None
self._ut_metadata_buffer = ''
self._ut_metadata_last_req = None
self._peer_id = None
self._peer_byte_exts = set()
self._peer_exts = {}
self._peer_have = None
self._peer_have_queue = []
self._packet_len = None
self._packet = ''
self._packet_timeout = None
self._packet_callback = None
self._msg_len = None
self._msg_callback = None
self._socket = None
self._socket_queue = []
self._state = self.STATE_NOT_CONNECTED
self._input_source = None
self._output_source = None
self._connect_source = None
self._hangup_source = None
def open(self, address):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setblocking(0)
self._socket.bind(('', 0))
self._connect_source = GLib.io_add_watch(self._socket, GLib.IO_OUT, self._socket_connect_cb)
self._hangup_source = GLib.io_add_watch(self._socket, GLib.IO_HUP, self._socket_hangup_cb)
self._packet_expect_input(self.HEADERS_LENGTH, self._handle_headers, 30)
err = self._socket.connect_ex(address)
if err not in (0, errno.EINPROGRESS):
raise BTConnectionError('Unable to connect: {}'.format(errno.errorcode[err]))
self._send_headers()
self._change_state(self.STATE_HEADERS)
def close(self):
self._close_sources()
self._socket.close()
self._change_state(self.STATE_CLOSED)
print('Closed')
@property
def metadata(self):
return self._metadata
@property
def peer_progress(self):
if self._peer_have is None:
return None
return self._peer_have.count()
@property
def piece_count(self):
if self._metadata is None:
return None
return (self.data_length + self._metadata['piece length'] - 1) // self._metadata['piece length']
@property
def data_length(self):
if self._metadata is None:
return None
if 'files' in self._metadata:
return sum(f['length'] for f in self._metadata['files'])
else:
return self._metadata['length']
def _change_state(self, state):
self._state = state
self.emit('state-changed', self._state)
def _close_sources(self):
for source in (self._hangup_source, self._connect_source,
self._input_source, self._output_source,
self._packet_timeout):
if source is not None:
GLib.source_remove(source)
def _socket_connect_cb(self, source, cond):
err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
print 'Unable to connect: {}'.format(errno.errorcode[err])
self.close()
return False
def _socket_hangup_cb(self, source, cond):
print('Hangup')
self.close()
return False
def _socket_input_cb(self, source, cond):
self._packet += self._socket.recv(self._packet_len-len(self._packet))
if len(self._packet) == self._packet_len:
GLib.source_remove(self._packet_timeout)
packet = self._packet
self._packet = ''
self._packet_callback(packet)
return False
return True
def _socket_output_cb(self, source, cond):
while len(self._socket_queue) > 0:
packet = self._socket_queue[0]
n = self._socket.send(packet)
if n < len(packet):
self._socket_queue[0] = packet[n:]
return True
else:
self._socket_queue.pop(0)
return False
def _packet_timeout_cb(self):
print('No activity')
self.close()
return False
def _packet_expect_input(self, length, callback, timeout):
self._packet_len = length
self._packet_callback = callback
self._packet_timeout = GLib.timeout_add_seconds(timeout, self._packet_timeout_cb)
self._input_source = GLib.io_add_watch(self._socket, GLib.IO_IN, self._socket_input_cb)
def _packet_send(self, packet):
self._socket_queue.append(packet)
if len(self._socket_queue) == 1:
GLib.io_add_watch(self._socket, GLib.IO_OUT, self._socket_output_cb)
def _send_headers(self):
bt_header = chr(19) + 'BitTorrent protocol'
ext_bytes = '\x00\x00\x00\x00\x00\x10\x00\x04'
self._packet_send(bt_header + ext_bytes + self._infohash + self._my_id)
def _send_message(self, msg):
msg_len = struct.pack('>L', len(msg))
self._packet_send(msg_len + msg)
def _send_ext_headers(self):
msg = chr(20) + chr(0) + bencode({'m': dict((v, k) for k, v in self._my_exts.iteritems())})
self._send_message(msg)
def _send_initial_have(self):
if self.BYTE_EXT_FAST_PEERS in self._peer_byte_exts:
msg = chr(self.MSG_TYPE_HAVE_NONE)
self._send_message(msg)
def _ut_metadata_send_request(self, piece):
ext_id = self._peer_exts['ut_metadata']
msg = chr(20) + chr(ext_id) + bencode({'msg_type': 0, 'piece': piece})
self._ut_metadata_last_req = piece
self._send_message(msg)
def _ut_metadata_validate(self):
def validate_files_list(files):
if len(files) == 0:
return False
for f in files:
if not (type(f) is dict and
'length' in f and type(f['length']) is int and
'path' in f and type(f['path']) is list and
len(f['path']) > 0 and all(f['path'])):
return False
return True
if hashlib.sha1(self._ut_metadata_buffer).digest() == self._infohash:
info_dict = bdecode(self._ut_metadata_buffer)
if ('name' in info_dict and type(info_dict['name']) is str and
'piece length' in info_dict and type(info_dict['piece length']) is int and
'pieces' in info_dict and type(info_dict['pieces']) is str and
(('length' in info_dict and type(info_dict['length']) is int) or
('files' in info_dict and type(info_dict['files']) is list and
validate_files_list(info_dict['files'])))):
self._ut_metadata_buffer = None
self._metadata = info_dict
if len(self._metadata['pieces']) != 20*self.piece_count:
self._metadata = None
return False
self.emit('metadata-changed')
self._play_have_queue()
return True
return False
def _handle_headers(self, packet):
bt_header_len, packet = ord(packet[:1]), packet[1:]
if bt_header_len != 19:
self.close()
return
bt_header, packet = packet[:bt_header_len], packet[bt_header_len:]
if bt_header != 'BitTorrent protocol':
self.close()
return
print('Connected to {!r}'.format(self._socket.getpeername()))
ext_bytes, packet = packet[:8], packet[8:]
print('Extension bytes {!r}'.format(ext_bytes))
if ord(ext_bytes[7]) & 0x4:
self._peer_byte_exts.add(self.BYTE_EXT_FAST_PEERS)
if ord(ext_bytes[5]) & 0x10:
self._peer_byte_exts.add(self.BYTE_EXT_EXTENSION)
infohash, packet = packet[:20], packet[20:]
if infohash != self._infohash:
self.close()
return
self._peer_id = packet[:20]
print('Peer id {!r}'.format(self._peer_id))
if self.BYTE_EXT_EXTENSION in self._peer_byte_exts:
self._change_state(self.STATE_EXT_HEADERS)
self._msg_callback = self._handle_ext_headers
self._send_ext_headers()
else:
self._change_state(self.STATE_RUNNING)
self._msg_callback = self._handle_message
self._send_initial_have()
self._packet_expect_input(4, self._handle_message_input, 240)
def _handle_message_input(self, packet):
if self._msg_len is None:
self._msg_len = struct.unpack('>L', packet)[0]
if self._msg_len == 0:
self._msg_len = None
self._packet_expect_input(4, self._handle_message_input, 240)
if self._msg_len > 64*1024*1024:
self.close()
return
else:
self._packet_expect_input(self._msg_len, self._handle_message_input, 60)
else:
self._msg_callback(packet)
self._msg_len = None
self._packet_expect_input(4, self._handle_message_input, 240)
def _handle_ext_headers(self, msg):
msg_type, msg = ord(msg[:1]), msg[1:]
if msg_type != self.MSG_TYPE_EXTENDED or len(msg) < 2:
self.close()
return
msg_ext_type, msg = ord(msg[:1]), msg[1:]
if msg_ext_type != 0:
self.close()
return
msg = bdecode(msg)
print('Extended handshake: {!r}'.format(msg))
if 'm' in msg and type(msg['m']) is dict:
for ext, ext_id in msg['m'].iteritems():
self._peer_exts[ext] = ext_id
if 'metadata_size' in msg and type(msg['metadata_size']) is int:
self._ut_metadata_size = msg['metadata_size']
self._change_state(self.STATE_RUNNING)
self._msg_callback = self._handle_message
self._send_initial_have()
if self._peer_exts.get('ut_metadata', 0) > 0:
self._ut_metadata_send_request(0)
def _play_have_queue(self):
if len(self._peer_have_queue) > 0:
msg_type, msg = self._peer_have_queue.pop(0)
self._handle_first_have_message(msg_type, msg)
while len(self._peer_have_queue) > 0:
msg_type, msg = self._peer_have_queue.pop(0)
self._handle_have_message(msg_type, msg)
def _handle_first_have_message(self, msg_type, msg):
def handle_bitfield(msg):
if 8*len(msg) < self.piece_count:
self.close()
return
self._peer_have = Bitfield(self.piece_count, msg)
def handle_have_all():
self._peer_have = Bitfield(self.piece_count)
for i in range(len(self._peer_have)):
self._peer_have.set(i)
def handle_have_none():
self._peer_have = Bitfield(self.piece_count)
if msg_type == self.MSG_TYPE_BITFIELD:
handle_bitfield(msg)
elif msg_type == self.MSG_TYPE_HAVE_ALL:
handle_have_all()
elif msg_type == self.MSG_TYPE_HAVE_NONE:
handle_have_none()
elif (msg_type == self.MSG_TYPE_HAVE and
not self.BYTE_EXT_FAST_PEERS in self._peer_byte_exts):
self._peer_have = Bitfield(self.piece_count)
self._handle_have_message(msg_type, msg)
else:
self.close()
return
self.emit('peer-progress-changed')
def _handle_have_message(self, msg_type, msg):
if msg_type == self.MSG_TYPE_HAVE:
index = struct.unpack('>L', msg)[0]
self._peer_have.set(index)
else:
self.close()
return
self.emit('peer-progress-changed')
def _handle_message(self, msg):
msg_type, msg = ord(msg[:1]), msg[1:]
def print_message():
print('Message: {}, {!r}'.format(msg_type, msg))
if ((msg_type == self.MSG_TYPE_HAVE and len(msg) == 4) or
(msg_type == self.MSG_TYPE_HAVE_ALL and len(msg) == 1) or
(msg_type == self.MSG_TYPE_HAVE_NONE and len(msg) == 1) or
msg_type == self.MSG_TYPE_BITFIELD):
if self.piece_count is None:
self._peer_have_queue.append((msg_type, msg))
elif self._peer_have is None:
self._handle_first_have_message(msg_type, msg)
else:
self._handle_have_message(msg_type, msg)
elif msg_type == self.MSG_TYPE_EXTENDED:
if len(msg) < 1:
self.close()
return
msg_ext_id, msg = ord(msg[:1]), msg[1:]
if msg_ext_id > 0 and msg_ext_id in self._my_exts:
msg_ext = self._my_exts[msg_ext_id]
if msg_ext == 'ut_metadata':
msg, rest = bdecode_all(msg)
total_pieces = (self._ut_metadata_size + (2**14-1)) / (2**14)
last_piece_size = self._ut_metadata_size - (2**14)*(total_pieces-1)
if 'msg_type' in msg and type(msg['msg_type']) is int:
if msg['msg_type'] == 0:
pass
elif msg['msg_type'] == 1:
if ('piece' in msg and type(msg['piece']) is int and
msg['piece'] == self._ut_metadata_last_req and
((msg['piece'] < total_pieces - 1 and
len(rest) == 2**14) or
(msg['piece'] == total_pieces - 1 and
len(rest) == last_piece_size))):
self._ut_metadata_buffer += rest
print('Metadata download: {}%'.format(int(100*float(self._ut_metadata_last_req+1)/total_pieces)))
if msg['piece'] == total_pieces - 1:
self._ut_metadata_last_req = None
self._ut_metadata_validate()
else:
self._ut_metadata_send_request(self._ut_metadata_last_req+1)
elif msg['msg_type'] == 2:
pass
else:
self.close()
return
elif msg_ext_id == 0:
print_message()
else:
self.close()
return
else:
print_message()
|
mit
| -7,686,328,247,688,158,000 | 3,118,681,562,662,043,000 | 33.016393 | 129 | 0.527169 | false |
brandond/ansible
|
lib/ansible/modules/windows/win_uri.py
|
13
|
6899
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Corwin Brown <[email protected]>
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_uri
version_added: '2.1'
short_description: Interacts with webservices
description:
- Interacts with FTP, HTTP and HTTPS web services.
- Supports Digest, Basic and WSSE HTTP authentication mechanisms.
- For non-Windows targets, use the M(uri) module instead.
options:
url:
description:
- Supports FTP, HTTP or HTTPS URLs in the form of (ftp|http|https)://host.domain:port/path.
type: str
required: yes
method:
description:
- The HTTP Method of the request or response.
type: str
choices: [ CONNECT, DELETE, GET, HEAD, MERGE, OPTIONS, PATCH, POST, PUT, REFRESH, TRACE ]
default: GET
content_type:
description:
- Sets the "Content-Type" header.
type: str
body:
description:
- The body of the HTTP request/response to the web service.
type: raw
user:
description:
- Username to use for authentication.
type: str
version_added: '2.4'
password:
description:
- Password to use for authentication.
type: str
version_added: '2.4'
force_basic_auth:
description:
- By default the authentication information is only sent when a webservice
responds to an initial request with a 401 status. Since some basic auth
services do not properly send a 401, logins will fail.
- This option forces the sending of the Basic authentication header upon
the initial request.
type: bool
default: no
version_added: '2.5'
dest:
description:
- Output the response body to a file.
type: path
version_added: '2.3'
headers:
description:
- Extra headers to set on the request, see the examples for more details on
how to set this.
type: dict
creates:
description:
- A filename, when it already exists, this step will be skipped.
type: path
version_added: '2.4'
removes:
description:
- A filename, when it does not exist, this step will be skipped.
type: path
version_added: '2.4'
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
type: bool
default: no
version_added: '2.4'
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the request.
- Can also be comma separated list of status codes.
type: list
default: 200
version_added: '2.4'
timeout:
description:
- Specifies how long the request can be pending before it times out (in seconds).
- The value 0 (zero) specifies an indefinite time-out.
- A Domain Name System (DNS) query can take up to 15 seconds to return or time out.
If your request contains a host name that requires resolution, and you set
C(timeout) to a value greater than zero, but less than 15 seconds, it can
take 15 seconds or more before your request times out.
type: int
default: 30
version_added: '2.4'
follow_redirects:
description:
- Whether or not the C(win_uri) module should follow redirects.
- C(all) will follow all redirects.
- C(none) will not follow any redirects.
- C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a C(GET) or C(HEAD) on the URI to which it is being redirected.
type: str
choices: [ all, none, safe ]
default: safe
version_added: '2.4'
maximum_redirection:
description:
- Specifies how many times C(win_uri) redirects a connection to an alternate
Uniform Resource Identifier (URI) before the connection fails.
- If C(maximum_redirection) is set to 0 (zero)
or C(follow_redirects) is set to C(none),
or set to C(safe) when not doing C(GET) or C(HEAD) it prevents all redirection.
type: int
default: 5
version_added: '2.4'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only
set to C(no) used on personally controlled sites using self-signed
certificates.
type: bool
default: yes
version_added: '2.4'
client_cert:
description:
- Specifies the client certificate (.pfx) that is used for a secure web request.
- The WinRM connection must be authenticated with C(CredSSP) if the
certificate file is not password protected.
- Other authentication types can set I(client_cert_password) when the cert
is password protected.
type: path
version_added: '2.4'
client_cert_password:
description:
- The password for the client certificate (.pfx) file that is used for a
secure web request.
type: str
version_added: '2.5'
seealso:
- module: uri
- module: win_get_url
author:
- Corwin Brown (@blakfeld)
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Perform a GET and Store Output
win_uri:
url: http://example.com/endpoint
register: http_output
# Set a HOST header to hit an internal webserver:
- name: Hit a Specific Host on the Server
win_uri:
url: http://example.com/
method: GET
headers:
host: www.somesite.com
- name: Perform a HEAD on an Endpoint
win_uri:
url: http://www.example.com/
method: HEAD
- name: POST a Body to an Endpoint
win_uri:
url: http://www.somesite.com/
method: POST
body: "{ 'some': 'json' }"
'''
RETURN = r'''
elapsed:
description: The number of seconds that elapsed while performing the download.
returned: always
type: float
sample: 23.2
url:
description: The Target URL.
returned: always
type: str
sample: https://www.ansible.com
status_code:
description: The HTTP Status Code of the response.
returned: success
type: int
sample: 200
status_description:
description: A summary of the status.
returned: success
type: str
sample: OK
content:
description: The raw content of the HTTP response.
returned: success and return_content is True
type: str
sample: '{"foo": "bar"}'
content_length:
description: The byte size of the response.
returned: success
type: int
sample: 54447
json:
description: The json structure returned under content as a dictionary.
returned: success and Content-Type is "application/json" or "application/javascript" and return_content is True
type: dict
sample: {"this-is-dependent": "on the actual return content"}
'''
|
gpl-3.0
| 6,385,540,270,731,466,000 | 2,338,802,119,717,146,000 | 30.076577 | 113 | 0.679809 | false |
akozumpl/yum
|
docs/sphinxdocs/rstgenerator.py
|
2
|
7752
|
#! /usr/bin/python
import sys, re, os
def generateFile(input_directory, file_name, output_directory,
package_heirarchy=None, module_name=None):
"""Generate a rst file telling sphinx to just generate documentation
for the public interface automatically. Output will be written to
*file_name*.rst in the current directory.
:param input_directory: a string specifying the directory containing the
source code file
:param file_name: the name of the python source code file to generate
a sphinx rst file describing
:param ouput_directory: a string specifying the directory where
the generated rst file should be placed. If *output_directory* does
not already exist, it will be created
:param package_heirarchy: a list of strings, where each name is
the name of a package, in the order of the hierarchy
:param module_name: the name of the module. If not given, the .py is
removed from *file_name* to produce the module_name
"""
#Stick all output into a list of strings, then just join it and output
#it all in on go.
output = []
# Create the output directory if it doesn't already exist. Note that
# if the directory is created between the check and the creation, it
# might cause issues, but I don't think this likely at all to happen
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except OSError as e:
print "Error creating the output directory"
print e.args
try:
#Open the file
f = open(os.path.join(input_directory, file_name), 'r')
#Do the module output
if not module_name:
module_name = re.search('(\w+).py$', file_name).group(1)
#Append the package names, if there are any
full_module_name = module_name
if package_heirarchy:
full_module_name = '.'.join(package_heirarchy) + '.' + module_name
output.append(full_module_name)
output.append('=' * len(full_module_name))
output.append('.. automodule:: %s\n' % full_module_name)
#Read the file, and do output for classes
class_reg = re.compile('^class (\w+)')
func_reg = re.compile('^def ((?:[a-zA-Z0-9]+_)*[a-zA-Z0-9]+)')
#We don't need a blank line between autofunction directives, but we do
#need one between autofunctions and headings etc. for classes. This
#keeps track if we're switching from autofunctions to classes, so we
#can add that blank line.
finding_functions = False
for line in iter(f):
#Search for classes
match = class_reg.match(line)
if match is not None:
if finding_functions:
output.append('')
finding_functions = False
class_name = match.group(1)
output.append(class_name)
output.append('-' * len(class_name))
output.append('''.. autoclass:: %s
:members:
:show-inheritance:
''' % class_name)
#Search for top level functions
else:
match = func_reg.match(line)
if match is not None:
func_name = match.group(1)
output.append('.. autofunction:: ' + func_name)
finding_functions = True
f.close()
except IOError as e:
print "Error opening the input file : ", os.path.join(input_directory, file_name)
print e.args[1]
else:
#Write the output
try:
output_file_name = os.path.join(output_directory, module_name) + '.rst'
f = open(output_file_name, 'w')
f.write('\n'.join(output))
except IOError as e:
print "Error opening the output file : ", output_file_name
print e.args[1]
def generateIndex(module_list, output_directory):
"""Create an index.rst file for sphinx in the given directory.
:param module_list: a list of the names of the modules to list in
the index file
:param output_directory: the directory to create the index file in
"""
#Sort the module_list
module_list.sort()
try:
#open the file
f = open(os.path.join(output_directory, 'index.rst'), 'w')
#Do the output
f.write(""".. Yum documentation master file, created by
sphinx-quickstart on Mon Jun 27 14:01:20 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Yum's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 2
""")
f.write('\n '.join(module_list))
f.write("""
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
""")
except IOError as e:
print "Error opening the output file."
print e.args[1]
def generateAll(source_directory, output_directory):
#Verify that both the source and output directories exist
# Keep a set of file names that are packages. This is
# useful so that later we will be able to figure out full
# module names.
packages = set()
# Keep a list of tuples containing python module names and
# relative paths, so that we can build the index file later
modules = []
# Walk the directory tree
for dirpath, dirnames, filenames in os.walk(source_directory, topdown=True):
# print dirpath
# print dirnames
# print filenames
# print
# Add the curent directory to packages if __init__.py exists
if '__init__.py' in filenames:
packages.add(dirpath)
# Find the heirarchy of packages that we are currently in
package_heirarchy = []
#Recurse up to the root
dirpath_i = dirpath
while dirpath_i != '/':
if dirpath_i in packages:
dirpath_i, tail = os.path.split(dirpath_i)
package_heirarchy.insert(0, tail)
else:
break
# Find the relative output directory, mirroring the input
# directory structure
relative_output_directory = ''
if not os.path.samefile(dirpath, source_directory):
relative_output_directory = os.path.relpath(dirpath, source_directory)
# Don't recurse into directories that are hidden, or for docs
for directory in dirnames:
if directory == "docs" or directory.startswith("."):
dirnames.remove(directory)
# Generate the rst for a file if it is a python source code file
for file_name in filenames:
# Skip file names that contain dashes, since they're not
# valid module names, so we won't be able to import them
# to generate the documentation anyway
if '-' in file_name:
continue
if file_name.endswith('.py'):
module_name = file_name.partition('.')[0]
modules.append(os.path.join(relative_output_directory,
module_name))
generateFile(dirpath, file_name,
os.path.join(output_directory, relative_output_directory),
package_heirarchy, module_name)
# Create the index.rst file
generateIndex(modules, output_directory)
if __name__ == "__main__":
generateAll(os.getcwd(), os.getcwd())
|
gpl-2.0
| -1,806,236,975,406,009,000 | 539,283,290,078,075,900 | 33.918919 | 89 | 0.58372 | false |
cholokei/android_kernel_samsung_milletwifikx
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
| 8,122,374,958,082,674,000 | -5,581,742,014,222,263,000 | 24.314534 | 88 | 0.675407 | false |
Livefyre/mongo-connector
|
mongo_connector/doc_managers/solr_doc_manager.py
|
7
|
11191
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.constants import (DEFAULT_COMMIT_INTERVAL,
DEFAULT_MAX_BULK)
from mongo_connector.util import retry_until_ok
from mongo_connector.doc_managers import DocManagerBase, exception_wrapper
from mongo_connector.doc_managers.formatters import DocumentFlattener
# pysolr only has 1 exception: SolrError
wrap_exceptions = exception_wrapper({
SolrError: errors.OperationFailed})
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
self._formatter = DocumentFlattener()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = doc.pop("_id")
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update spec contains the new document
update_spec['_ts'] = doc['_ts']
update_spec['ns'] = doc['ns']
update_spec['_id'] = doc['_id']
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc
@wrap_exceptions
def update(self, doc, update_spec):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
# Commit outstanding changes so that the document to be updated is the
# same version to which the changes apply.
self.commit()
query = "%s:%s" % (self.unique_key, str(doc['_id']))
results = self.solr.search(query)
if not len(results):
# Document may not be retrievable yet
self.commit()
results = self.solr.search(query)
# Results is an iterable containing only 1 result
for doc in results:
updated = self.apply_update(doc, update_spec)
# A _version_ of 0 will always apply the update
updated['_version_'] = 0
self.upsert(updated)
return updated
@wrap_exceptions
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
@wrap_exceptions
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs)
@wrap_exceptions
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=str(doc["_id"]),
commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _stream_search(self, query):
"""Helper method for iterating over Solr search results."""
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range."""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self._stream_search(query)
@wrap_exceptions
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self._stream_search(query)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
for r in result:
r['_id'] = r.pop(self.unique_key)
return r
|
apache-2.0
| 3,558,653,287,738,158,000 | -6,737,511,519,091,860,000 | 36.680135 | 80 | 0.588151 | false |
culots/kernel_lge_madai
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
| -4,793,507,202,238,396,000 | -4,194,426,033,606,114,300 | 37.930451 | 162 | 0.572498 | false |
ehirt/odoo
|
addons/email_template/__init__.py
|
381
|
1144
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import email_template
import wizard
import res_partner
import ir_actions
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -2,901,899,773,568,940,500 | -4,797,884,887,942,258,000 | 41.37037 | 78 | 0.628497 | false |
mrtnrdl/.macdots
|
scripts/bin/platform-tools/systrace/catapult/dependency_manager/dependency_manager/cloud_storage_info_unittest.py
|
4
|
10473
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import stat
import unittest
import mock
from pyfakefs import fake_filesystem_unittest
from py_utils import cloud_storage
from dependency_manager import archive_info
from dependency_manager import cloud_storage_info
from dependency_manager import exceptions
class CloudStorageInfoTest(unittest.TestCase):
def testInitCloudStorageInfoErrors(self):
# Must specify cloud storage information atomically.
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', None, None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, 'cs_hash', None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, 'download_path', None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, None, 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, 'cs_hash', 'download_path', 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', None, 'download_path', 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash', None, 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash', 'download_path', None)
def testInitWithVersion(self):
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
'cs_remote_path', version_in_cs='version_in_cs')
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, 'cs_hash',
'download_path', 'cs_remote_path', version_in_cs='version_in_cs')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
version_in_cs='version_in_cs')
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual('version_in_cs', cs_info._version_in_cs)
def testInitWithArchiveInfoErrors(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None, None,
archive_info=zip_info)
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
'cs_remote_path', archive_info=zip_info)
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, 'cs_bucket', 'cs_hash',
None, 'cs_remote_path', archive_info=zip_info)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash',
'cs_remote_path', None, version_in_cs='version',
archive_info=zip_info)
def testInitWithArchiveInfo(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
archive_info=zip_info)
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual(zip_info, cs_info._archive_info)
self.assertFalse(cs_info._version_in_cs)
def testInitWithVersionAndArchiveInfo(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path',
'cs_remote_path', version_in_cs='version_in_cs',
archive_info=zip_info)
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual(zip_info, cs_info._archive_info)
self.assertEqual('version_in_cs', cs_info._version_in_cs)
def testInitMinimumCloudStorageInfo(self):
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket',
'cs_hash', 'download_path',
'cs_remote_path')
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertFalse(cs_info._version_in_cs)
self.assertFalse(cs_info._archive_info)
class TestGetRemotePath(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.config_path = '/test/dep_config.json'
self.fs.CreateFile(self.config_path, contents='{}')
self.download_path = '/foo/download_path'
self.fs.CreateFile(
self.download_path, contents='1010110', st_mode=stat.S_IWOTH)
self.cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
version_in_cs='1.2.3.4',)
def tearDown(self):
self.tearDownPyfakefs()
@mock.patch(
'py_utils.cloud_storage.GetIfHashChanged')
def testGetRemotePathNoArchive(self, cs_get_mock):
def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
del cs_path, bucket, file_hash
if not os.path.exists(download_path):
self.fs.CreateFile(download_path, contents='1010001010101010110101')
cs_get_mock.side_effect = _GetIfHashChangedMock
# All of the needed information is given, and the downloaded path exists
# after calling cloud storage.
self.assertEqual(
os.path.abspath(self.download_path),
self.cs_info.GetRemotePath())
self.assertTrue(os.stat(self.download_path).st_mode & stat.S_IXUSR)
# All of the needed information is given, but the downloaded path doesn't
# exists after calling cloud storage.
self.fs.RemoveObject(self.download_path)
cs_get_mock.side_effect = [True]
self.assertRaises(
exceptions.FileNotFoundError, self.cs_info.GetRemotePath)
@mock.patch(
'dependency_manager.dependency_manager_util.UnzipArchive')
@mock.patch(
'dependency_manager.cloud_storage_info.cloud_storage.GetIfHashChanged') # pylint: disable=line-too-long
def testGetRemotePathWithArchive(self, cs_get_mock, unzip_mock):
def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
del cs_path, bucket, file_hash
if not os.path.exists(download_path):
self.fs.CreateFile(download_path, contents='1010001010101010110101')
cs_get_mock.side_effect = _GetIfHashChangedMock
unzip_path = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir')
path_within_archive = os.path.join('path', 'within', 'archive')
dep_path = os.path.join(unzip_path, path_within_archive)
def _UnzipFileMock(archive_file, unzip_location, tmp_location=None):
del archive_file, tmp_location
self.fs.CreateFile(dep_path)
self.fs.CreateFile(os.path.join(unzip_location, 'extra', 'path'))
self.fs.CreateFile(os.path.join(unzip_location, 'another_extra_path'))
unzip_mock.side_effect = _UnzipFileMock
# Create a stale directory that's expected to get deleted
stale_unzip_path_glob = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir_*')
stale_path = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir_stale')
self.fs.CreateDirectory(stale_path)
self.fs.CreateFile(os.path.join(stale_path, 'some_file'))
self.assertFalse(os.path.exists(dep_path))
zip_info = archive_info.ArchiveInfo(
self.download_path, unzip_path, path_within_archive,
stale_unzip_path_glob)
self.cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
version_in_cs='1.2.3.4', archive_info=zip_info)
self.assertFalse(unzip_mock.called)
self.assertEqual(
os.path.abspath(dep_path),
self.cs_info.GetRemotePath())
self.assertTrue(os.path.exists(dep_path))
self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
unzip_mock.assert_called_once_with(self.download_path, unzip_path)
# Stale directory should have been deleted
self.assertFalse(os.path.exists(stale_path))
# Should not need to unzip a second time, but should return the same path.
unzip_mock.reset_mock()
self.assertTrue(os.path.exists(dep_path))
self.assertEqual(
os.path.abspath(dep_path),
self.cs_info.GetRemotePath())
self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
self.assertFalse(unzip_mock.called)
@mock.patch(
'py_utils.cloud_storage.GetIfHashChanged')
def testGetRemotePathCloudStorageErrors(self, cs_get_mock):
cs_get_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(cloud_storage.CloudStorageError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.ServerError
self.assertRaises(cloud_storage.ServerError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.NotFoundError
self.assertRaises(cloud_storage.NotFoundError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(cloud_storage.PermissionError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(cloud_storage.CredentialsError,
self.cs_info.GetRemotePath)
|
unlicense
| -977,523,326,111,531,500 | -3,547,435,361,731,512,300 | 43.948498 | 109 | 0.683281 | false |
truongdq/chainer
|
tests/cupy_tests/test_ndarray_get.py
|
5
|
1384
|
import unittest
import cupy
from cupy import cuda
from cupy import testing
import numpy
from numpy import testing as np_testing
@testing.gpu
class TestArrayGet(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.stream = cuda.Stream()
def check_get(self, f, stream):
a_gpu = f(cupy)
a_cpu = f(numpy)
np_testing.assert_array_equal(a_gpu.get(stream), a_cpu)
@testing.for_all_dtypes()
def test_contiguous_array(self, dtype):
contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)
self.check_get(contiguous_array, None)
@testing.for_all_dtypes()
def test_non_contiguous_array(self, dtype):
non_contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)[0::2]
self.check_get(non_contiguous_array, None)
@testing.for_all_dtypes()
def test_contiguous_array_stream(self, dtype):
contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)
self.check_get(contiguous_array, self.stream.ptr)
@testing.for_all_dtypes()
def test_non_contiguous_array_stream(self, dtype):
non_contiguous_array = lambda xp: testing.shaped_arange(
(3,), xp=xp, dtype=dtype)[0::2]
self.check_get(non_contiguous_array, self.stream.ptr)
|
mit
| 4,518,072,569,227,188,700 | 5,887,489,470,099,084,000 | 29.755556 | 64 | 0.645231 | false |
muntasirsyed/intellij-community
|
python/lib/Lib/distutils/command/install.py
|
92
|
23567
|
"""distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install.py 43363 2006-03-27 21:55:21Z phillip.eby $"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.errors import DistutilsOptionError
from glob import glob
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'nt': WINDOWS_SCHEME,
'mac': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'java': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
}
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if self.negative_opt.has_key(opt_name):
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install
|
apache-2.0
| -2,134,758,447,552,868,900 | 596,546,051,634,723,600 | 37.382736 | 78 | 0.566003 | false |
BehavioralInsightsTeam/edx-platform
|
lms/djangoapps/static_template_view/urls.py
|
15
|
2320
|
"""
URLs for static_template_view app
"""
from django.conf import settings
from django.conf.urls import url
from static_template_view import views
urlpatterns = [
# Semi-static views (these need to be rendered and have the login bar, but don't change)
url(r'^404$', views.render, {'template': '404.html'}, name="404"),
# display error page templates, for testing purposes
url(r'^404$', views.render_404, name='static_template_view.views.render_404'),
url(r'^500$', views.render_500, name='static_template_view.views.render_500'),
url(r'^blog$', views.render, {'template': 'blog.html'}, name="blog"),
url(r'^contact$', views.render, {'template': 'contact.html'}, name="contact"),
url(r'^donate$', views.render, {'template': 'donate.html'}, name="donate"),
url(r'^faq$', views.render, {'template': 'faq.html'}, name="faq"),
url(r'^help$', views.render, {'template': 'help.html'}, name="help_edx"),
url(r'^jobs$', views.render, {'template': 'jobs.html'}, name="jobs"),
url(r'^news$', views.render, {'template': 'news.html'}, name="news"),
url(r'^press$', views.render, {'template': 'press.html'}, name="press"),
url(r'^media-kit$', views.render, {'template': 'media-kit.html'}, name="media-kit"),
url(r'^copyright$', views.render, {'template': 'copyright.html'}, name="copyright"),
# Press releases
url(r'^press/([_a-zA-Z0-9-]+)$', views.render_press_release, name='press_release'),
]
# Only enable URLs for those marketing links actually enabled in the
# settings. Disable URLs by marking them as None.
for key, value in settings.MKTG_URL_LINK_MAP.items():
# Skip disabled URLs
if value is None:
continue
# These urls are enabled separately
if key == "ROOT" or key == "COURSES":
continue
# The MKTG_URL_LINK_MAP key specifies the template filename
template = key.lower()
if '.' not in template:
# Append STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION if
# no file extension was specified in the key
template = "%s.%s" % (template, settings.STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION)
# Make the assumption that the URL we want is the lowercased
# version of the map key
urlpatterns.append(url(r'^%s$' % key.lower(), views.render, {'template': template}, name=value))
|
agpl-3.0
| 9,079,719,172,782,650,000 | 4,935,277,237,324,638,000 | 43.615385 | 100 | 0.65431 | false |
KshitijKarthick/tvecs
|
tvecs/visualization/server.py
|
1
|
12380
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""CherryPy Server to provide recommendations of semantic similarity."""
import os
import json
import codecs
import cherrypy
import argparse
import configparser
from gensim.models import Word2Vec
from nltk.tokenize import word_tokenize
from jinja2 import Environment, FileSystemLoader
from cherrypy.lib.static import serve_file
from functools import reduce
from tvecs.preprocessor import yandex_api as yandex
from tvecs.vector_space_mapper.vector_space_mapper import VectorSpaceMapper
class Server(object):
"""
Server Configuration for t-vex.
.. seealso::
* :mod:`cherrypy`
"""
def __init__(self):
"""Initialization the Language and Model."""
self.model = {
"english": Server._load_model("english"),
"hindi": Server._load_model("hindi"),
}
self.cross_lang_vm = {
("english", "hindi"): self._create_vector_space_mapper("english", "hindi"),
("hindi", "english"): self._create_vector_space_mapper("hindi", "english"),
}
self.cache_file_path = os.path.join(
"tvecs", "visualization", "cached_dictionary"
)
if not os.path.exists(self.cache_file_path):
json.dump({}, codecs.open(self.cache_file_path, "w", encoding="utf-8"))
self.cached_dictionary = {}
with codecs.open(self.cache_file_path, "r", encoding="utf-8") as f:
self.cached_dictionary = json.load(f)
@cherrypy.expose
def index(self):
"""Semantic spac visualization html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "index.html")
)
)
@cherrypy.expose
def multivariate_analysis(self):
"""Parallel Coordinates for multivariate analysis html page return."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "multivariate.html")
)
)
@cherrypy.expose
def cross_lingual(self):
"""Cross Lingual recommender html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "cross_lingual.html")
)
)
@cherrypy.expose
def distances(self):
"""Visualization with distances html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "distances.html")
)
)
@cherrypy.expose
def lingual_semantics(self):
"""Semantically related words in same language returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "intra_language.html")
)
)
def retrieve_meaning(self, language, word):
"""
Optional: Translate the word.
Retrieve Eng definition(s) of a word from cached file or PyDictionary.
API Documentation
:param language: Language for which definition needed
:param word: Word whose definition needs to be retrieved
:type language: String
:type word: String
:return: word and definition
:rtype: :class:`String`
"""
from PyDictionary import PyDictionary
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
word = word.lower()
trword = word
if word in self.cached_dictionary:
return json.dumps(self.cached_dictionary[word])
else:
if language == "hindi":
trword = yandex.get_translation(word, "hi-en")
dictionary = PyDictionary(trword)
meanings = [trword, dictionary.meaning(trword)]
if meanings[1]:
self.cached_dictionary[word] = meanings
with codecs.open(self.cache_file_path, "w", encoding="utf-8") as f:
f.write(json.dumps(self.cached_dictionary))
return json.dumps(meanings)
@cherrypy.expose
def get_distance(self, word1, word2, language1, language2):
"""
Retrieve cosine distance between word1 and word2.
- word1 and word2 have to be in the vocabulary
of language1 and language2, respectively.
API Documentation
:param word1: A word in language1's vocabulary
:param language1: Language of word1
:param word2: A word in language2's vocabulary
:param language2: Language of word2
:type word1: String
:type language1: String
:type word2: String
:type language2: String
:return: Dictionary with keys 'word1', 'word2', and 'distance'
:rtype: :class:`Dictionary`
.. py:currentmodule:: tvecs.vector_space_mapper.vector_space_mapper
.. seealso::
* :func:`VectorSpaceMapper.obtain_cosine_similarity`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
word1 = word1.lower()
word2 = word2.lower()
vm = self.cross_lang_vm.get((language1, language2))
similarity = None
if vm is not None:
similarity = vm.obtain_cosine_similarity(word1, word2)
distance = 1 - similarity if similarity is not None else None
return json.dumps({"word1": word1, "word2": word2, "distance": distance})
@cherrypy.expose
def retrieve_recommendations(self, language, word, limit=10):
"""
Retrieve number of semantically similar recommendations.
- For specified word in the given lang retrieve limit recommendations
API Documentation
:param language: Language for which recommendations required
:param word: Semantic similar words provided for given word
:param limit: No of words to be recommended [ Default 10 ]
:type language: String
:type word: String
:type limit: Integer
:return: List of recommendations
:rtype: :class:`List`
.. seealso::
* :class:`gensim.models.Word2Vec`
"""
word = word.lower()
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
model = self.model.get(language)
if model is not None:
data = Server._recommend(word, int(limit), fn=model.most_similar)
else:
data = json.dumps(None)
return data
@cherrypy.expose
def get_cross_lingual_recommendations(self, lang1, lang2, word, topn=10):
"""
Provide cross lingual recommendations.
API Documentation
:param lang1: Language 1 for cross lingual recommendations.
:param lang2: Language 2 for cross lingual recommendations.
:param word: Word utilised for cross lingual recommendations.
:param topn: No of recommendations provided.
:type lang1: String
:type lang2: String
:type word: String
:type topn: Integer
:return: List of recommendations
:rtype: :class:`List`
.. seealso::
* :mod:`tvecs.vector_space_mapper.vector_space_mapper`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
sentence = word_tokenize(word.lower())
vm = self.cross_lang_vm.get((lang1, lang2))
data = None
if vm is not None:
result_vec = reduce(
lambda x, y: x + y, [self.model[lang1][word] for word in sentence]
)
data = Server._recommend(
result_vec, int(topn), fn=vm.get_recommendations_from_vec
)
return data
@cherrypy.expose
def _create_vector_space_mapper(self, lang1, lang2):
"""
Create Vector Space Mapper between Languages.
API Documentation
:param lang1: Language 1 used for building
:class:`tvecs.vector_space_mapper.vector_space_mapper.VectorSpaceMapper`
object
:param lang2: Language 2 used for building
:class:`tvecs.vector_space_mapper.vector_space_mapper.VectorSpaceMapper`
object
:return: JSON with successful/failure message
:rtype: JSON
.. seealso::
:mod:`tvecs.vector_space_mapper.vector_space_mapper`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
vm = None
with codecs.open(
os.path.join(
"data", "bilingual_dictionary", "%s_%s_train_bd" % (lang1, lang2)
),
"r",
encoding="utf-8",
) as file:
data = file.read().split("\n")
bilingual_dict = [(line.split(" ")[0], line.split(" ")[1]) for line in data]
if (self.model.get(lang1) is not None) and (
self.model.get(lang2) is not None
):
vm = VectorSpaceMapper(
self.model[lang1], self.model[lang2], bilingual_dict
)
vm.map_vector_spaces()
return vm
@staticmethod
def _recommend(word, limit, fn):
"""Vector Space Mapper recommend functionality."""
try:
vec_list = fn(word, topn=limit)
except KeyError:
vec_list = None
if vec_list is not None:
data = json.dumps([{"word": tup[0], "weight": tup[1]} for tup in vec_list])
else:
data = json.dumps(None)
return data
@staticmethod
def _load_model(language):
"""Used to load Word2Vec Model."""
return Word2Vec.load(
os.path.join("data", "models", "t-vex-%s-model" % language)
)
if __name__ == "__main__":
"""Setting up the Server with Specified Configuration"""
parser = argparse.ArgumentParser(description="Obtain Server Configuration")
parser.add_argument(
"-c",
"--config",
dest="config",
help="Config File Path",
action="store",
type=str,
default=os.path.join("tvecs", "visualization", "server.conf"),
)
parser.add_argument(
"-p", "--port", dest="port", help="Port", action="store", type=int, default=None
)
parser.add_argument(
"-s",
"--host",
dest="host",
help="Host Name",
action="store",
type=str,
default=None,
)
args = parser.parse_args()
server_config = configparser.RawConfigParser()
env = Environment(loader=FileSystemLoader("static"))
conf = {
"/": {"tools.staticdir.root": os.path.abspath(os.getcwd())},
"/js": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "js"
),
},
"/css": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "css"
),
},
"/images": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "images"
),
},
"/resources": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "resources"
),
},
}
server_port = args.port
server_host = args.host
server_config.read(args.config)
if args.port is None:
server_port = server_config.get("Server", "port")
if args.host is None:
server_host = server_config.get("Server", "host")
thread_pool = server_config.get("Server", "thread_pool")
queue_size = server_config.get("Server", "queue_size")
cherrypy.config.update({"server.socket_host": server_host})
cherrypy.config.update({"server.thread_pool": int(thread_pool)})
cherrypy.config.update({"server.socket_queue_size": int(queue_size)})
cherrypy.config.update(
{"server.socket_port": int(os.environ.get("PORT", server_port))}
)
cherrypy.quickstart(Server(), "/", conf)
|
mit
| -7,473,930,900,086,082,000 | 1,459,250,055,526,372,600 | 33.873239 | 88 | 0.569386 | false |
rdipietro/tensorflow
|
tensorflow/python/summary/event_accumulator_test.py
|
3
|
30392
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator as ea
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer
from tensorflow.python.training import saver
class _EventGenerator(object):
"""Class that can add_events and then yield them back.
Satisfies the EventGenerator API required for the EventAccumulator.
Satisfies the EventWriter API required to create a SummaryWriter.
Has additional convenience methods for adding test events.
"""
def __init__(self, zero_out_timestamps=False):
self.items = []
self.zero_out_timestamps = zero_out_timestamps
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(
value=[tf.Summary.Value(
tag=tag, simple_value=value)]))
self.AddEvent(event)
def AddHistogram(self,
tag,
wall_time=0,
step=0,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=None,
hbucket=None):
histo = tf.HistogramProto(min=hmin,
max=hmax,
num=hnum,
sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(
tag=tag, histo=histo)]))
self.AddEvent(event)
def AddImage(self,
tag,
wall_time=0,
step=0,
encoded_image_string=b'imgstr',
width=150,
height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width,
height=height)
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(
tag=tag, image=image)]))
self.AddEvent(event)
def AddAudio(self,
tag,
wall_time=0,
step=0,
encoded_audio_string=b'sndstr',
content_type='audio/wav',
sample_rate=44100,
length_frames=22050):
audio = tf.Summary.Audio(encoded_audio_string=encoded_audio_string,
content_type=content_type,
sample_rate=sample_rate,
length_frames=length_frames)
event = tf.summary.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(
tag=tag, audio=audio)]))
self.AddEvent(event)
def AddEvent(self, event):
if self.zero_out_timestamps:
event.wall_time = 0
self.items.append(event)
def add_event(self, event): # pylint: disable=invalid-name
"""Match the EventWriter API."""
self.AddEvent(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.empty = {ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1,
max=2,
num=3,
sum=4,
sum_squares=5,
bucket_limit=[1, 2, 3],
bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2,
max=3,
num=4,
sum=5,
sum_squares=6,
bucket_limit=[2, 3, 4],
bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (
7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testCompressHistogram_uglyHistogram(self):
bps = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
histogram_values = ea.HistogramValue(
min=0.0,
max=1.0,
num=960.0,
sum=64.0,
sum_squares=64.0,
bucket_limit=[
0.0, 1e-12, 0.917246389039776, 1.0089710279437536,
1.7976931348623157e+308
],
bucket=[0.0, 896.0, 0.0, 64.0, 0.0])
histogram_event = ea.HistogramEvent(0, 0, histogram_values)
compressed_event = ea._CompressHistogram(histogram_event, bps)
vals = compressed_event.compressed_histogram_values
self.assertEquals(tuple(v.basis_point for v in vals), bps)
self.assertAlmostEqual(vals[0].value, 0.0)
self.assertAlmostEqual(vals[1].value, 7.157142857142856e-14)
self.assertAlmostEqual(vals[2].value, 1.7003571428571426e-13)
self.assertAlmostEqual(vals[3].value, 3.305357142857143e-13)
self.assertAlmostEqual(vals[4].value, 5.357142857142857e-13)
self.assertAlmostEqual(vals[5].value, 7.408928571428571e-13)
self.assertAlmostEqual(vals[6].value, 9.013928571428571e-13)
self.assertAlmostEqual(vals[7].value, 9.998571428571429e-13)
self.assertAlmostEqual(vals[8].value, 1.0)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
im2 = ea.ImageEvent(wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
gen.AddImage('im1',
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
gen.AddImage('im2',
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testAudio(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
snd1 = ea.AudioEvent(wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
snd2 = ea.AudioEvent(wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
gen.AddAudio('snd1',
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
gen.AddAudio('snd2',
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
acc.Reload()
self.assertEqual(acc.Audio('snd1'), [snd1])
self.assertEqual(acc.Audio('snd2'), [snd2])
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
with self.assertRaises(KeyError):
acc.Audio('s1')
with self.assertRaises(KeyError):
acc.Audio('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.summary.Event(wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
gen.AddAudio('snd1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.AUDIO: ['snd1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is false.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300, 101,
201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = tf.summary.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = tf.summary.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = tf.summary.SessionLog(status=tf.summary.SessionLog.START)
gen.AddEvent(tf.summary.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
def testFirstEventTimestamp(self):
"""Test that FirstEventTimestamp() returns wall_time of the first event."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=10, step=20, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=30, step=40, value=20)
self.assertEqual(acc.FirstEventTimestamp(), 10)
def testReloadPopulatesFirstEventTimestamp(self):
"""Test that Reload() means FirstEventTimestamp() won't load events."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=1, step=2, file_version='brain.Event:2'))
acc.Reload()
def _Die(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError('Load() should not be called')
self.stubs.Set(gen, 'Load', _Die)
self.assertEqual(acc.FirstEventTimestamp(), 1)
def testFirstEventTimestampLoadsEvent(self):
"""Test that FirstEventTimestamp() doesn't discard the loaded event."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(
tf.summary.Event(
wall_time=1, step=2, file_version='brain.Event:2'))
self.assertEqual(acc.FirstEventTimestamp(), 1)
acc.Reload()
self.assertEqual(acc.file_version, 2.0)
def testTFSummaryScalar(self):
"""Verify processing of tf.summary.scalar, which uses TensorSummary op."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.placeholder(tf.float32)
tf.summary.scalar('scalar1', ipt)
tf.summary.scalar('scalar2', ipt * ipt)
merged = tf.contrib.deprecated.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged, feed_dict={ipt: i})
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
seq1 = [ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)]
seq2 = [
ea.ScalarEvent(
wall_time=0, step=i, value=i * i) for i in xrange(10)
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['scalar1', 'scalar2'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
self.assertEqual(accumulator.Scalars('scalar1'), seq1)
self.assertEqual(accumulator.Scalars('scalar2'), seq2)
first_value = accumulator.Scalars('scalar1')[0].value
self.assertTrue(isinstance(first_value, float))
def testTFSummaryImage(self):
"""Verify processing of tf.summary.image."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.ones([10, 4, 4, 3], tf.uint8)
# This is an interesting example, because the old tf.image_summary op
# would throw an error here, because it would be tag reuse.
# Using the tf node name instead allows argument re-use to the image
# summary.
with tf.name_scope('1'):
tf.summary.image('images', ipt, max_outputs=1)
with tf.name_scope('2'):
tf.summary.image('images', ipt, max_outputs=2)
with tf.name_scope('3'):
tf.summary.image('images', ipt, max_outputs=3)
merged = tf.contrib.deprecated.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged)
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
tags = [
u'1/images/image', u'2/images/image/0', u'2/images/image/1',
u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: tags,
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.summary.FileWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
writer.add_graph(graph)
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# Write a bunch of events using the writer.
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: ['test run']
})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
def testGraphFromMetaGraphBecomesAvailable(self):
"""Test accumulator by writing values and then reading them."""
directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.summary.FileWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: []
})
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -5,680,844,792,792,706,000 | -2,787,011,587,783,670,000 | 34.839623 | 80 | 0.592261 | false |
bonitadecker77/python-for-android
|
python3-alpha/extra_modules/gdata/notebook/data.py
|
125
|
1426
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Notebook Data API"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.opensearch.data
NB_TEMPLATE = '{http://schemas.google.com/notes/2008/}%s'
class ComesAfter(atom.core.XmlElement):
"""Preceding element."""
_qname = NB_TEMPLATE % 'comesAfter'
id = 'id'
class NoteEntry(gdata.data.GDEntry):
"""Describes a note entry in the feed of a user's notebook."""
class NotebookFeed(gdata.data.GDFeed):
"""Describes a notebook feed."""
entry = [NoteEntry]
class NotebookListEntry(gdata.data.GDEntry):
"""Describes a note list entry in the feed of a user's list of public notebooks."""
class NotebookListFeed(gdata.data.GDFeed):
"""Describes a notebook list feed."""
entry = [NotebookListEntry]
|
apache-2.0
| 4,938,240,878,973,071,000 | 1,472,493,929,778,915,600 | 24.927273 | 85 | 0.72791 | false |
albertz/music-player
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsimage.py
|
3
|
9049
|
from PyObjCTools.TestSupport import *
import AppKit
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSImageHelper (NSObject):
def image_didLoadRepresentation_withStatus_(self, i, r, s): pass
def image_didLoadPartOfRepresentation_withValidRows_(self, i, r, c): pass
class TestNSImage (TestCase):
def test_compositePoint(self):
# comes straight from ReSTedit. Works on PPC, not on Intel (as of r1791)
ws = AppKit.NSWorkspace.sharedWorkspace()
txtIcon = ws.iconForFileType_("txt")
txtIcon.setSize_( (16,16) )
htmlIcon = ws.iconForFileType_("html")
htmlIcon.setSize_( (16,16) )
comboIcon = AppKit.NSImage.alloc().initWithSize_( (100,100) )
comboIcon.lockFocus()
txtIcon.compositeToPoint_fromRect_operation_((0,0), ((0,0),(16,16)), AppKit.NSCompositeCopy)
htmlIcon.compositeToPoint_fromRect_operation_((8,0), ((8,0),(8,16)), AppKit.NSCompositeCopy)
comboIcon.unlockFocus()
def testConstants(self):
self.assertEqual(NSImageLoadStatusCompleted, 0)
self.assertEqual(NSImageLoadStatusCancelled, 1)
self.assertEqual(NSImageLoadStatusInvalidData, 2)
self.assertEqual(NSImageLoadStatusUnexpectedEOF, 3)
self.assertEqual(NSImageLoadStatusReadError, 4)
self.assertEqual(NSImageCacheDefault, 0)
self.assertEqual(NSImageCacheAlways, 1)
self.assertEqual(NSImageCacheBySize, 2)
self.assertEqual(NSImageCacheNever, 3)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance( NSImageNameQuickLookTemplate, unicode)
self.assertIsInstance( NSImageNameBluetoothTemplate, unicode)
self.assertIsInstance( NSImageNameIChatTheaterTemplate, unicode)
self.assertIsInstance( NSImageNameSlideshowTemplate, unicode)
self.assertIsInstance( NSImageNameActionTemplate, unicode)
self.assertIsInstance( NSImageNameSmartBadgeTemplate, unicode)
self.assertIsInstance( NSImageNameIconViewTemplate, unicode)
self.assertIsInstance( NSImageNameListViewTemplate, unicode)
self.assertIsInstance( NSImageNameColumnViewTemplate, unicode)
self.assertIsInstance( NSImageNameFlowViewTemplate, unicode)
self.assertIsInstance( NSImageNamePathTemplate, unicode)
self.assertIsInstance( NSImageNameInvalidDataFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameLockLockedTemplate, unicode)
self.assertIsInstance( NSImageNameLockUnlockedTemplate, unicode)
self.assertIsInstance( NSImageNameGoRightTemplate, unicode)
self.assertIsInstance( NSImageNameGoLeftTemplate, unicode)
self.assertIsInstance( NSImageNameRightFacingTriangleTemplate, unicode)
self.assertIsInstance( NSImageNameLeftFacingTriangleTemplate, unicode)
self.assertIsInstance( NSImageNameAddTemplate, unicode)
self.assertIsInstance( NSImageNameRemoveTemplate, unicode)
self.assertIsInstance( NSImageNameRevealFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameFollowLinkFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameEnterFullScreenTemplate, unicode)
self.assertIsInstance( NSImageNameExitFullScreenTemplate, unicode)
self.assertIsInstance( NSImageNameStopProgressTemplate, unicode)
self.assertIsInstance( NSImageNameStopProgressFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameRefreshTemplate, unicode)
self.assertIsInstance( NSImageNameRefreshFreestandingTemplate, unicode)
self.assertIsInstance( NSImageNameBonjour, unicode)
self.assertIsInstance( NSImageNameDotMac, unicode)
self.assertIsInstance( NSImageNameComputer, unicode)
self.assertIsInstance( NSImageNameFolderBurnable, unicode)
self.assertIsInstance( NSImageNameFolderSmart, unicode)
self.assertIsInstance( NSImageNameNetwork, unicode)
self.assertIsInstance( NSImageNameMultipleDocuments, unicode)
self.assertIsInstance( NSImageNameUserAccounts, unicode)
self.assertIsInstance( NSImageNamePreferencesGeneral, unicode)
self.assertIsInstance( NSImageNameAdvanced, unicode)
self.assertIsInstance( NSImageNameInfo, unicode)
self.assertIsInstance( NSImageNameFontPanel, unicode)
self.assertIsInstance( NSImageNameColorPanel, unicode)
self.assertIsInstance( NSImageNameUser, unicode)
self.assertIsInstance( NSImageNameUserGroup, unicode)
self.assertIsInstance( NSImageNameEveryone, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSImage.setName_)
self.assertArgIsBOOL(NSImage.setScalesWhenResized_, 0)
self.assertResultIsBOOL(NSImage.scalesWhenResized)
self.assertArgIsBOOL(NSImage.setDataRetained_, 0)
self.assertResultIsBOOL(NSImage.isDataRetained)
self.assertArgIsBOOL(NSImage.setCachedSeparately_, 0)
self.assertResultIsBOOL(NSImage.isCachedSeparately)
self.assertArgIsBOOL(NSImage.setCacheDepthMatchesImageDepth_, 0)
self.assertResultIsBOOL(NSImage.cacheDepthMatchesImageDepth)
self.assertArgIsBOOL(NSImage.setUsesEPSOnResolutionMismatch_, 0)
self.assertResultIsBOOL(NSImage.usesEPSOnResolutionMismatch)
self.assertArgIsBOOL(NSImage.setPrefersColorMatch_, 0)
self.assertResultIsBOOL(NSImage.prefersColorMatch)
self.assertArgIsBOOL(NSImage.setMatchesOnMultipleResolution_, 0)
self.assertResultIsBOOL(NSImage.matchesOnMultipleResolution)
self.assertResultIsBOOL(NSImage.drawRepresentation_inRect_)
self.assertResultIsBOOL(NSImage.isValid)
self.assertResultIsBOOL(NSImage.canInitWithPasteboard_)
self.assertResultIsBOOL(NSImage.isFlipped)
self.assertArgIsBOOL(NSImage.setFlipped_, 0)
self.assertResultIsBOOL(NSImage.isTemplate)
self.assertArgIsBOOL(NSImage.setTemplate_, 0)
def testProtocols(self):
self.assertArgHasType(TestNSImageHelper.image_didLoadPartOfRepresentation_withValidRows_, 2, objc._C_NSInteger)
self.assertArgHasType(TestNSImageHelper.image_didLoadRepresentation_withStatus_, 2, objc._C_NSUInteger)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertArgHasType(NSImage.drawInRect_fromRect_operation_fraction_respectFlipped_hints_,
0, NSRect.__typestr__)
self.assertArgIsBOOL(NSImage.drawInRect_fromRect_operation_fraction_respectFlipped_hints_, 4)
self.assertArgIsBOOL(NSImage.lockFocusFlipped_, 0)
self.assertArgHasType(NSImage.initWithCGImage_size_, 1, NSSize.__typestr__)
self.assertArgHasType(NSImage.CGImageForProposedRect_context_hints_, 0, b'o^' + NSRect.__typestr__)
self.assertArgHasType(NSImage.bestRepresentationForRect_context_hints_, 0, NSRect.__typestr__)
self.assertResultIsBOOL(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_)
self.assertArgHasType(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_, 0, NSRect.__typestr__)
self.assertArgHasType(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_, 1, NSRect.__typestr__)
@min_os_level('10.7')
def testMethods10_7(self):
self.assertResultIsBOOL(NSImage.matchesOnlyOnBestFittingAxis)
self.assertArgIsBOOL(NSImage.setMatchesOnlyOnBestFittingAxis_, 0)
@min_os_level('10.8')
def testMethods10_8(self):
self.assertArgIsBOOL(NSImage.imageWithSize_flipped_drawingHandler_, 1)
self.assertArgIsBlock(NSImage.imageWithSize_flipped_drawingHandler_, 2,
objc._C_NSBOOL + NSRect.__typestr__)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(NSImageHintCTM, unicode)
self.assertIsInstance(NSImageHintInterpolation, unicode)
self.assertIsInstance(NSImageNameFolder, unicode)
self.assertIsInstance(NSImageNameMobileMe, unicode)
self.assertIsInstance(NSImageNameUserGuest, unicode)
self.assertIsInstance(NSImageNameMenuOnStateTemplate, unicode)
self.assertIsInstance(NSImageNameMenuMixedStateTemplate, unicode)
self.assertIsInstance(NSImageNameApplicationIcon, unicode)
self.assertIsInstance(NSImageNameTrashEmpty, unicode)
self.assertIsInstance(NSImageNameTrashFull, unicode)
self.assertIsInstance(NSImageNameHomeTemplate, unicode)
self.assertIsInstance(NSImageNameBookmarksTemplate, unicode)
self.assertIsInstance(NSImageNameCaution, unicode)
self.assertIsInstance(NSImageNameStatusAvailable, unicode)
self.assertIsInstance(NSImageNameStatusPartiallyAvailable, unicode)
self.assertIsInstance(NSImageNameStatusUnavailable, unicode)
self.assertIsInstance(NSImageNameStatusNone, unicode)
@min_os_level('10.8')
def testConstants10_8(self):
self.assertIsInstance(NSImageNameShareTemplate, unicode)
if __name__ == "__main__":
main()
|
bsd-2-clause
| 3,166,195,322,770,168,000 | 6,648,218,525,737,417,000 | 52.544379 | 121 | 0.750691 | false |
xuyuhan/depot_tools
|
third_party/logilab/common/sphinx_ext.py
|
117
|
3329
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
from logilab.common.decorators import monkeypatch
from sphinx.ext import autodoc
class DocstringOnlyModuleDocumenter(autodoc.ModuleDocumenter):
objtype = 'docstring'
def format_signature(self):
pass
def add_directive_header(self, sig):
pass
def document_members(self, all_members=False):
pass
def resolve_name(self, modname, parents, path, base):
if modname is not None:
return modname, parents + [base]
return (path or '') + base, []
#autodoc.add_documenter(DocstringOnlyModuleDocumenter)
def setup(app):
app.add_autodocumenter(DocstringOnlyModuleDocumenter)
from sphinx.ext.autodoc import (ViewList, Options, AutodocReporter, nodes,
assemble_option_dict, nested_parse_with_titles)
@monkeypatch(autodoc.AutoDirective)
def run(self):
self.filename_set = set() # a set of dependent filenames
self.reporter = self.state.document.reporter
self.env = self.state.document.settings.env
self.warnings = []
self.result = ViewList()
# find out what documenter to call
objtype = self.name[4:]
doc_class = self._registry[objtype]
# process the options with the selected documenter's option_spec
self.genopt = Options(assemble_option_dict(
self.options.items(), doc_class.option_spec))
# generate the output
documenter = doc_class(self, self.arguments[0])
documenter.generate(more_content=self.content)
if not self.result:
return self.warnings
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for fn in self.filename_set:
self.env.note_dependency(fn)
# use a custom reporter that correctly assigns lines to source
# filename/description and lineno
old_reporter = self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(self.result,
self.state.memo.reporter)
if self.name in ('automodule', 'autodocstring'):
node = nodes.section()
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, self.result, node)
else:
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
self.state.memo.reporter = old_reporter
return self.warnings + node.children
|
bsd-3-clause
| -7,921,418,954,781,783,000 | 3,166,252,672,385,482,000 | 37.264368 | 81 | 0.699009 | false |
mer-hybris/android_kernel_lge_hammerhead
|
scripts/gcc-wrapper.py
|
1276
|
3382
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
gpl-2.0
| 8,401,279,440,210,688,000 | -95,649,248,591,068,260 | 34.229167 | 97 | 0.668539 | false |
hazelcast/hazelcast-python-client
|
tests/unit/discovery/hazelcast_cloud_discovery_test.py
|
1
|
5900
|
import ssl
import os
import threading
from hazelcast.six.moves import BaseHTTPServer
from hazelcast import six
from unittest import TestCase
from hazelcast.core import Address
from hazelcast.errors import HazelcastCertificationError
from hazelcast.discovery import HazelcastCloudDiscovery
from hazelcast.client import HazelcastClient
from tests.util import get_abs_path
TOKEN = "123abc456"
PRIVATE_LINK_TOKEN = "abc123def"
CLOUD_URL = HazelcastCloudDiscovery._CLOUD_URL_PATH
RESPONSE = """[
{"private-address":"10.47.0.8","public-address":"54.213.63.142:32298"},
{"private-address":"10.47.0.9","public-address":"54.245.77.185:32298"},
{"private-address":"10.47.0.10","public-address":"54.186.232.37:32298"}
]"""
PRIVATE_LINK_RESPONSE = """[
{"private-address":"100.96.5.1:5701","public-address":"10.113.44.139:31115"},
{"private-address":"100.96.4.2:5701","public-address":"10.113.44.130:31115"}
]"""
HOST = "localhost"
ADDRESSES = {
Address("10.47.0.8", 32298): Address("54.213.63.142", 32298),
Address("10.47.0.9", 32298): Address("54.245.77.185", 32298),
Address("10.47.0.10", 32298): Address("54.186.232.37", 32298),
}
PRIVATE_LINK_ADDRESSES = {
Address("100.96.5.1", 5701): Address("10.113.44.139", 31115),
Address("100.96.4.2", 5701): Address("10.113.44.130", 31115),
}
class CloudHTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
idx = self.path.find("=")
if idx > 0:
if self.path[: idx + 1] == CLOUD_URL:
# Found a cluster with the given token
token = self.path[idx + 1 :]
if token == TOKEN:
self._set_response(200, RESPONSE)
elif token == PRIVATE_LINK_TOKEN:
self._set_response(200, PRIVATE_LINK_RESPONSE)
# Can not find a cluster with the given token
else:
self._set_response(
404,
'{"message":"Cluster with token: ' + self.path[idx + 1 :] + ' not found."}',
)
else:
# Wrong URL
self._set_response(404, "default backend - 404")
def _set_response(self, status, message):
self.send_response(status)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(six.b(message))
class Server(object):
cur_dir = os.path.dirname(__file__)
def __init__(self):
self.server = BaseHTTPServer.HTTPServer((HOST, 0), CloudHTTPHandler)
self.server.socket = ssl.wrap_socket(
self.server.socket,
get_abs_path(self.cur_dir, "key.pem"),
get_abs_path(self.cur_dir, "cert.pem"),
server_side=True,
)
self.port = self.server.socket.getsockname()[1]
def start_server(self):
self.server.serve_forever()
def close_server(self):
self.server.shutdown()
class TestClient(HazelcastClient):
def _start(self):
# Let the client to initialize the cloud address provider and translator, don't actually start it.
pass
class HazelcastCloudDiscoveryTest(TestCase):
cur_dir = os.path.dirname(__file__)
@classmethod
def setUpClass(cls):
cls.ctx = ssl.create_default_context(cafile=get_abs_path(cls.cur_dir, "cert.pem"))
cls.server = Server()
cls.server_thread = threading.Thread(target=cls.server.start_server)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server.close_server()
def test_found_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, TOKEN)
discovery._ctx = self.ctx
addresses = discovery.discover_nodes()
six.assertCountEqual(self, ADDRESSES, addresses)
def test_private_link_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, PRIVATE_LINK_TOKEN)
discovery._ctx = self.ctx
addresses = discovery.discover_nodes()
six.assertCountEqual(self, PRIVATE_LINK_ADDRESSES, addresses)
def test_not_found_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, "INVALID_TOKEN")
discovery._ctx = self.ctx
with self.assertRaises(IOError):
discovery.discover_nodes()
def test_invalid_url(self):
discovery = create_discovery(HOST, self.server.port, "/INVALID_URL", "")
discovery._ctx = self.ctx
with self.assertRaises(IOError):
discovery.discover_nodes()
def test_invalid_certificates(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, TOKEN)
with self.assertRaises(HazelcastCertificationError):
discovery.discover_nodes()
def test_client_with_cloud_discovery(self):
old = HazelcastCloudDiscovery._CLOUD_URL_BASE
try:
HazelcastCloudDiscovery._CLOUD_URL_BASE = "%s:%s" % (HOST, self.server.port)
client = TestClient(cloud_discovery_token=TOKEN)
client._address_provider.cloud_discovery._ctx = self.ctx
private_addresses, secondaries = client._address_provider.load_addresses()
six.assertCountEqual(self, list(ADDRESSES.keys()), private_addresses)
six.assertCountEqual(self, secondaries, [])
for private_address in private_addresses:
translated_address = client._address_provider.translate(private_address)
self.assertEqual(ADDRESSES[private_address], translated_address)
finally:
HazelcastCloudDiscovery._CLOUD_URL_BASE = old
def create_discovery(host, port, url, token, timeout=5.0):
discovery = HazelcastCloudDiscovery(token, timeout)
discovery._CLOUD_URL_BASE = "%s:%s" % (host, port)
discovery._CLOUD_URL_PATH = url
return discovery
|
apache-2.0
| -8,160,329,542,020,613,000 | -1,927,133,886,347,608,300 | 34.542169 | 106 | 0.634576 | false |
40223234/40223234
|
static/Brython3.1.1-20150328-091302/Lib/posixpath.py
|
722
|
14212
|
"""Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
# TODO: on Mac OS X, this should really return s.lower().
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
valid_types = all(isinstance(s, (str, bytes, bytearray))
for s in (a, ) + p)
if valid_types:
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components.") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
s2 = os.lstat(parent)
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
if isinstance(name, bytes):
name = str(name, 'ASCII')
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if isinstance(path, bytes):
value = value.encode('ASCII')
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
|
gpl-3.0
| -2,796,775,229,446,515,700 | 2,250,619,471,957,790,000 | 28.670146 | 78 | 0.585913 | false |
tensorflow/tensorflow
|
tensorflow/python/keras/initializers/initializers_v1.py
|
6
|
4404
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 1."""
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import keras_export
_v1_zeros_initializer = init_ops.Zeros
_v1_ones_initializer = init_ops.Ones
_v1_constant_initializer = init_ops.Constant
_v1_variance_scaling_initializer = init_ops.VarianceScaling
_v1_orthogonal_initializer = init_ops.Orthogonal
_v1_identity = init_ops.Identity
_v1_glorot_uniform_initializer = init_ops.GlorotUniform
_v1_glorot_normal_initializer = init_ops.GlorotNormal
keras_export(v1=['keras.initializers.Zeros', 'keras.initializers.zeros'])(
_v1_zeros_initializer)
keras_export(v1=['keras.initializers.Ones', 'keras.initializers.ones'])(
_v1_ones_initializer)
keras_export(v1=['keras.initializers.Constant', 'keras.initializers.constant'])(
_v1_constant_initializer)
keras_export(v1=['keras.initializers.VarianceScaling'])(
_v1_variance_scaling_initializer)
keras_export(v1=['keras.initializers.Orthogonal',
'keras.initializers.orthogonal'])(_v1_orthogonal_initializer)
keras_export(v1=['keras.initializers.Identity',
'keras.initializers.identity'])(_v1_identity)
keras_export(v1=['keras.initializers.glorot_uniform'])(
_v1_glorot_uniform_initializer)
keras_export(v1=['keras.initializers.glorot_normal'])(
_v1_glorot_normal_initializer)
@keras_export(v1=['keras.initializers.RandomNormal',
'keras.initializers.random_normal',
'keras.initializers.normal'])
class RandomNormal(init_ops.RandomNormal):
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(RandomNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
'keras.initializers.uniform'])
class RandomUniform(init_ops.RandomUniform):
def __init__(self, minval=-0.05, maxval=0.05, seed=None,
dtype=dtypes.float32):
super(RandomUniform, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal'])
class TruncatedNormal(init_ops.TruncatedNormal):
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(TruncatedNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.lecun_normal'])
class LecunNormal(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export(v1=['keras.initializers.lecun_uniform'])
class LecunUniform(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export(v1=['keras.initializers.he_normal'])
class HeNormal(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export(v1=['keras.initializers.he_uniform'])
class HeUniform(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
|
apache-2.0
| -5,711,245,338,143,268,000 | 7,977,520,015,345,552,000 | 35.7 | 80 | 0.693233 | false |
mic4ael/indico
|
indico/core/db/sqlalchemy/searchable_titles.py
|
1
|
1794
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import escape_like, preprocess_ts_string
from indico.util.decorators import strict_classproperty
class SearchableTitleMixin(object):
"""Mixin to add a fulltext-searchable title column."""
#: Whether the title column may not be empty
title_required = True
@strict_classproperty
@classmethod
def __auto_table_args(cls):
args = [
db.Index('ix_{}_title_fts'.format(cls.__tablename__), db.func.to_tsvector('simple', cls.title),
postgresql_using='gin')
]
if cls.title_required:
args.append(db.CheckConstraint("title != ''", 'valid_title'))
return tuple(args)
@declared_attr
def title(cls):
return db.Column(
db.String,
nullable=False
)
@classmethod
def title_matches(cls, search_string, exact=False):
"""Check whether the title matches a search string.
To be used in a SQLAlchemy `filter` call.
:param search_string: A string to search for
:param exact: Whether to search for the exact string
"""
crit = db.func.to_tsvector('simple', cls.title).match(preprocess_ts_string(search_string),
postgresql_regconfig='simple')
if exact:
crit = crit & cls.title.ilike('%{}%'.format(escape_like(search_string)))
return crit
|
mit
| 3,132,638,551,013,628,400 | -4,763,128,694,348,741,000 | 32.222222 | 107 | 0.628205 | false |
kubeflow/kfserving
|
python/kfserving/test/test_v1alpha2_tensorflow_spec.py
|
1
|
1476
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kfserving
from kfserving.models.v1alpha2_tensorflow_spec import V1alpha2TensorflowSpec # noqa: E501
from kfserving.rest import ApiException
class TestV1alpha2TensorflowSpec(unittest.TestCase):
"""V1alpha2TensorflowSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha2TensorflowSpec(self):
"""Test V1alpha2TensorflowSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kfserving.models.v1alpha2_tensorflow_spec.V1alpha2TensorflowSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 4,911,751,256,830,537,000 | 2,805,388,470,212,248,600 | 26.333333 | 98 | 0.722222 | false |
AlbertoPeon/invenio
|
modules/webaccess/lib/external_authentication_cern_unit_tests.py
|
5
|
2815
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the user handling library."""
__revision__ = "$Id$"
import unittest
from invenio.config import CFG_CERN_SITE
from invenio.testutils import make_test_suite, run_test_suite, nottest
class ExternalAuthenticationCernTest(unittest.TestCase):
"""Test functions related to the CERN authentication."""
def setUp(self):
# pylint: disable=C0103
"""setting up helper variables for tests"""
from invenio import external_authentication_cern as cern
self.username, self.userpwd, self.useremail = \
open('demopwd.cfg', 'r').readline().strip().split(':', 2)
self.cern = cern.ExternalAuthCern()
@nottest
def test_auth_user_ok(self):
"""external authentication CERN - authorizing user through CERN system: should pass"""
self.assertEqual(self.cern.auth_user(self.username, self.userpwd), \
self.useremail)
@nottest
def test_auth_user_fail(self):
"""external authentication CERN - authorizing user through CERN system: should fail"""
self.assertEqual(self.cern.auth_user('patata', 'patata'), None)
@nottest
def test_fetch_user_groups_membership(self):
"""external authentication CERN - fetching user group membership at CERN"""
self.assertNotEqual(self.cern.fetch_user_groups_membership(self.useremail, self.userpwd), 0)
self.assertEqual(self.cern.fetch_user_groups_membership('patata', 'patata'), {})
@nottest
def test_fetch_user_preferences(self):
"""external authentication CERN - fetching user setting from CERN"""
self.assertEqual(self.cern.fetch_user_preferences(self.username, self.userpwd)['email'], self.useremail)
#self.assertRaises(KeyError, self.cern.fetch_user_preferences('patata', 'patata')['email'])
if CFG_CERN_SITE:
TEST_SUITE = make_test_suite(ExternalAuthenticationCernTest,)
else:
TEST_SUITE = make_test_suite()
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
gpl-2.0
| -2,364,728,739,518,304,000 | -7,221,902,300,306,730,000 | 39.797101 | 112 | 0.69556 | false |
alekz112/statsmodels
|
statsmodels/datasets/tests/test_utils.py
|
26
|
1697
|
import os
import sys
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://statsmodels.sourceforge.net/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
|
bsd-3-clause
| 6,082,375,528,818,919,000 | -8,215,799,423,457,892,000 | 41.425 | 101 | 0.705952 | false |
carlos-ferras/Sequence-ToolKit
|
pyqtgraph/graphicsItems/GraphicsObject.py
|
44
|
1720
|
from ..Qt import QtGui, QtCore, USE_PYSIDE
if not USE_PYSIDE:
import sip
from .GraphicsItem import GraphicsItem
__all__ = ['GraphicsObject']
class GraphicsObject(GraphicsItem, QtGui.QGraphicsObject):
"""
**Bases:** :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`, :class:`QtGui.QGraphicsObject`
Extension of QGraphicsObject with some useful methods (provided by :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`)
"""
_qtBaseClass = QtGui.QGraphicsObject
def __init__(self, *args):
self.__inform_view_on_changes = True
QtGui.QGraphicsObject.__init__(self, *args)
self.setFlag(self.ItemSendsGeometryChanges)
GraphicsItem.__init__(self)
def itemChange(self, change, value):
ret = QtGui.QGraphicsObject.itemChange(self, change, value)
if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
self.parentChanged()
try:
inform_view_on_change = self.__inform_view_on_changes
except AttributeError:
# It's possible that the attribute was already collected when the itemChange happened
# (if it was triggered during the gc of the object).
pass
else:
if inform_view_on_change and change in [self.ItemPositionHasChanged, self.ItemTransformHasChanged]:
self.informViewBoundsChanged()
## workaround for pyqt bug:
## http://www.riverbankcomputing.com/pipermail/pyqt/2012-August/031818.html
if not USE_PYSIDE and change == self.ItemParentChange and isinstance(ret, QtGui.QGraphicsItem):
ret = sip.cast(ret, QtGui.QGraphicsItem)
return ret
|
gpl-3.0
| -2,896,618,104,139,773,400 | -5,076,695,781,178,992,000 | 43.102564 | 132 | 0.668605 | false |
gtko/CouchPotatoServer
|
libs/guessit/transfo/guess_bonus_features.py
|
150
|
2155
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import found_property
import logging
log = logging.getLogger(__name__)
def process(mtree):
def previous_group(g):
for leaf in mtree.unidentified_leaves()[::-1]:
if leaf.node_idx < g.node_idx:
return leaf
def next_group(g):
for leaf in mtree.unidentified_leaves():
if leaf.node_idx > g.node_idx:
return leaf
def same_group(g1, g2):
return g1.node_idx[:2] == g2.node_idx[:2]
bonus = [ node for node in mtree.leaves() if 'bonusNumber' in node.guess ]
if bonus:
bonusTitle = next_group(bonus[0])
if same_group(bonusTitle, bonus[0]):
found_property(bonusTitle, 'bonusTitle', 0.8)
filmNumber = [ node for node in mtree.leaves()
if 'filmNumber' in node.guess ]
if filmNumber:
filmSeries = previous_group(filmNumber[0])
found_property(filmSeries, 'filmSeries', 0.9)
title = next_group(filmNumber[0])
found_property(title, 'title', 0.9)
season = [ node for node in mtree.leaves() if 'season' in node.guess ]
if season and 'bonusNumber' in mtree.info:
series = previous_group(season[0])
if same_group(series, season[0]):
found_property(series, 'series', 0.9)
|
gpl-3.0
| -992,742,349,119,161,100 | 1,395,819,338,709,934,000 | 34.327869 | 78 | 0.659861 | false |
Mibou/django-memcache-status
|
memcache_status/templatetags/memcache_status_tags.py
|
1
|
3249
|
from django import template
from django.conf import settings
try:
from django.core.cache import caches
except:
from django.core.cache import get_cache as caches
if caches.__module__.startswith('debug_toolbar'):
from debug_toolbar.panels.cache import original_get_cache as caches
get_cache = lambda cache_name: caches(cache_name) if hasattr(caches, '__call__') else caches[cache_name]
register = template.Library()
class CacheStats(template.Node):
"""
Reads the cache stats out of the memcached cache backend. Returns `None`
if no cache stats supported.
"""
def render(self, context):
cache_stats = []
for cache_backend_nm, cache_backend_attrs in settings.CACHES.iteritems():
try:
cache_backend = get_cache(cache_backend_nm)
this_backend_stats = cache_backend._cache.get_stats()
# returns list of (name, stats) tuples
for server_name, server_stats in this_backend_stats:
cache_stats.append(("%s: %s" % (
cache_backend_nm, server_name), server_stats))
except AttributeError: # this backend probably doesn't support that
continue
context['cache_stats'] = cache_stats
return ''
@register.tag
def get_cache_stats(parser, token):
return CacheStats()
@register.filter
def prettyname(name):
return ' '.join([word.capitalize() for word in name.split('_')])
@register.filter
def prettyvalue(value, key):
return PrettyValue().format(key, value)
class PrettyValue(object):
"""
Helper class that reformats the value. Looks for a method named
``format_<key>_value`` and returns that value. Returns the value
as is, if no format method is found.
"""
def format(self, key, value):
try:
func = getattr(self, 'format_%s_value' % key.lower())
return func(value)
except AttributeError:
return value
def format_limit_maxbytes_value(self, value):
return "%s (%s)" % (value, self.human_bytes(value))
def format_bytes_read_value(self, value):
return "%s (%s)" % (value, self.human_bytes(value))
def format_bytes_written_value(self, value):
return "%s (%s)" % (value, self.human_bytes(value))
def format_uptime_value(self, value):
return self.fract_timestamp(int(value))
def format_time_value(self, value):
from datetime import datetime
return datetime.fromtimestamp(int(value)).strftime('%x %X')
def fract_timestamp(self, s):
years, s = divmod(s, 31556952)
min, s = divmod(s, 60)
h, min = divmod(min, 60)
d, h = divmod(h, 24)
return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min, s)
def human_bytes(self, bytes):
bytes = float(bytes)
if bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fGB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fMB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fKB' % kilobytes
else:
size = '%.2fB' % bytes
return size
|
bsd-3-clause
| -6,607,954,822,406,276,000 | -3,214,422,139,393,917,400 | 32.84375 | 104 | 0.602647 | false |
alexteodor/odoo
|
addons/sale/edi/sale_order.py
|
403
|
10861
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from openerp.tools.translate import _
from werkzeug import url_encode
SALE_ORDER_LINE_EDI_STRUCT = {
'sequence': True,
'name': True,
#custom: 'date_planned'
'product_id': True,
'product_uom': True,
'price_unit': True,
#custom: 'product_qty'
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
SALE_ORDER_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
#custom: 'partner_ref'
'date_order': True,
'partner_id': True,
#custom: 'partner_address'
#custom: 'notes'
'order_line': SALE_ORDER_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'payment_term': True,
'order_policy': True,
'user_id': True,
'state': True,
}
class sale_order(osv.osv, EDIMixin):
_inherit = 'sale.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a Sale order"""
edi_struct = dict(edi_struct or SALE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(sale_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'purchase.order',
'__import_module': 'purchase',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
'partner_ref': order.client_order_ref or False,
'notes': order.note or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['customer'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document['partner_invoice_id'] = partner_edi_m2o
edi_document['partner_shipping_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'sale'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['client_order_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['note'] = edi_document.pop('notes', False)
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
order_lines = edi_document['order_line']
for order_line in order_lines:
self._edi_requires_attributes(('product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
order_line['product_uom_qty'] = order_line['product_qty']
del order_line['product_qty']
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(sale_order,self).edi_import(cr, uid, edi_document, context=context)
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy in ('prepaid', 'manual') and \
order.company_id.paypal_account and order.state != 'draft':
params = {
"cmd": "_xclick",
"business": order.company_id.paypal_account,
"item_name": order.company_id.name + " Order " + order.name,
"invoice": order.name,
"amount": order.amount_total,
"currency_code": order.pricelist_id.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Order_PayNow_" + order.pricelist_id.currency_id.name,
}
res[order.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class sale_order_line(osv.osv, EDIMixin):
_inherit='sale.order.line'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Overridden to provide sale order line fields with the expected names
(sale and purchase orders have different column names)"""
edi_struct = dict(edi_struct or SALE_ORDER_LINE_EDI_STRUCT)
edi_doc_list = []
for line in records:
edi_doc = super(sale_order_line,self).edi_export(cr, uid, [line], edi_struct, context)[0]
edi_doc['__import_model'] = 'purchase.order.line'
edi_doc['product_qty'] = line.product_uom_qty
if line.product_uos:
edi_doc.update(product_uom=line.product_uos,
product_qty=line.product_uos_qty)
edi_doc_list.append(edi_doc)
return edi_doc_list
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 1,404,109,992,368,409,900 | 1,382,669,157,351,739,000 | 46.845815 | 122 | 0.589448 | false |
gcd0318/django
|
django/contrib/auth/backends.py
|
468
|
6114
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
class ModelBackend(object):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, username=None, password=None, **kwargs):
UserModel = get_user_model()
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.check_password(password):
return user
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a non-existing user (#20760).
UserModel().set_password(password)
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj})
def _get_permissions(self, user_obj, obj, from_name):
"""
Returns the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous() or obj is not None:
return set()
perm_cache_name = '_%s_perm_cache' % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms))
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, 'user')
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, 'group')
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = self.get_user_permissions(user_obj)
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
UserModel = get_user_model()
try:
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
UserModel = get_user_model()
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
|
bsd-3-clause
| -1,320,028,264,850,444,000 | 8,356,519,026,543,775,000 | 37.2125 | 95 | 0.616454 | false |
twitter-forks/bazel
|
src/test/py/bazel/bazel_clean_test.py
|
6
|
4484
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
import unittest
from src.test.py.bazel import test_base
class BazelCleanTest(test_base.TestBase):
def testBazelClean(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'genrule(',
' name = "x",',
' outs = ["x.out"],',
' cmd = "touch $@",',
')',
])
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-genfiles'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_genfiles = stdout[0]
exit_code, stdout, stderr = self.RunBazel(['info', 'output_base'])
self.AssertExitCode(exit_code, 0, stderr)
output_base = stdout[0]
# Repeat 10 times to ensure flaky error like
# https://github.com/bazelbuild/bazel/issues/5907 are caught.
for _ in range(0, 10):
exit_code, _, stderr = self.RunBazel(['build', '//foo:x'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertTrue(os.path.exists(
os.path.join(bazel_genfiles, 'foo/x.out')))
exit_code, _, stderr = self.RunBazel(['clean'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertFalse(os.path.exists(
os.path.join(bazel_genfiles, 'foo/x.out')))
self.assertTrue(os.path.exists(output_base))
exit_code, _, stderr = self.RunBazel(['build', '//foo:x'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertTrue(os.path.exists(os.path.join(bazel_genfiles, 'foo/x.out')))
exit_code, _, stderr = self.RunBazel(['clean', '--expunge'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertFalse(os.path.exists(
os.path.join(bazel_genfiles, 'foo/x.out')))
self.assertFalse(os.path.exists(output_base))
@unittest.skipIf(not test_base.TestBase.IsLinux(),
'Async clean only supported on Linux')
def testBazelAsyncClean(self):
self.ScratchFile('WORKSPACE')
exit_code, _, stderr = self.RunBazel(['clean', '--async'])
self.AssertExitCode(exit_code, 0, stderr)
matcher = self._findMatch(' moved to (.*) for deletion', stderr)
self.assertTrue(matcher, stderr)
first_temp = matcher.group(1)
self.assertTrue(first_temp, stderr)
# Now do it again (we need to build to recreate exec root).
self.RunBazel(['build'])
exit_code, _, stderr = self.RunBazel(['clean', '--async'])
self.AssertExitCode(exit_code, 0, stderr)
matcher = self._findMatch(' moved to (.*) for deletion', stderr)
self.assertTrue(matcher, stderr)
second_temp = matcher.group(1)
self.assertTrue(second_temp, stderr)
# Two directories should be different.
self.assertNotEqual(second_temp, first_temp, stderr)
@unittest.skipIf(not test_base.TestBase.IsLinux(),
'Async clean only supported on Linux')
def testBazelAsyncCleanWithReadonlyDirectories(self):
self.ScratchFile('WORKSPACE')
exit_code, _, stderr = self.RunBazel(['build'])
self.AssertExitCode(exit_code, 0, stderr)
exit_code, stdout, stderr = self.RunBazel(['info', 'execution_root'])
self.AssertExitCode(exit_code, 0, stderr)
execroot = stdout[0]
readonly_dir = os.path.join(execroot, 'readonly')
os.mkdir(readonly_dir)
open(os.path.join(readonly_dir, 'somefile'), 'wb').close()
os.chmod(readonly_dir, 0o555)
exit_code, _, stderr = self.RunBazel(['clean', '--async'])
matcher = self._findMatch(' moved to (.*) for deletion', stderr)
self.assertTrue(matcher, stderr)
temp = matcher.group(1)
for _ in range(50):
if not os.path.isdir(temp):
break
time.sleep(.1)
else:
self.fail('temporary directory not removed: {!r}'.format(stderr))
def _findMatch(self, pattern, items):
r = re.compile(pattern)
for line in items:
matcher = r.search(line)
if matcher:
return matcher
return None
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -6,669,028,933,885,200,000 | 8,006,058,320,900,091,000 | 36.057851 | 80 | 0.656334 | false |
MCP1/android_kernel_motorola_msm8960dt-common
|
tools/perf/scripts/python/net_dropmonitor.py
|
1258
|
1562
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
gpl-2.0
| -9,028,273,216,370,404,000 | 2,125,014,063,766,842,000 | 20.694444 | 90 | 0.633163 | false |
SonyCSL/KadecotPepperSample
|
KadecotSamples/lib/requests/packages/urllib3/response.py
|
64
|
17149
|
from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# We certainly don't want to preload content when the response is chunked.
if not self.chunked and preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
return data
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
except Exception:
# The response may not be closed but we're not going to use it anymore
# so close it now to ensure that the connection is released back to the pool.
if self._original_response and not self._original_response.isclosed():
self._original_response.close()
raise
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if data:
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
yield self._decode(chunk, decode_content=decode_content,
flush_decoder=True)
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
|
mit
| -2,060,233,968,995,775,700 | -2,684,453,789,138,211,300 | 34.358763 | 89 | 0.575194 | false |
nikolas/lettuce
|
tests/integration/lib/Django-1.3/tests/regressiontests/forms/models.py
|
89
|
2477
|
# -*- coding: utf-8 -*-
import datetime
import tempfile
from django.db import models
from django.core.files.storage import FileSystemStorage
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
class BoundaryModel(models.Model):
positive_integer = models.PositiveIntegerField(null=True, blank=True)
callable_default_value = 0
def callable_default():
global callable_default_value
callable_default_value = callable_default_value + 1
return callable_default_value
class Defaults(models.Model):
name = models.CharField(max_length=255, default='class default value')
def_date = models.DateField(default = datetime.date(1980, 1, 1))
value = models.IntegerField(default=42)
callable_default = models.IntegerField(default=callable_default)
class ChoiceModel(models.Model):
"""For ModelChoiceField and ModelMultipleChoiceField tests."""
name = models.CharField(max_length=10)
class ChoiceOptionModel(models.Model):
"""Destination for ChoiceFieldModel's ForeignKey.
Can't reuse ChoiceModel because error_message tests require that it have no instances."""
name = models.CharField(max_length=10)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u'ChoiceOption %d' % self.pk
class ChoiceFieldModel(models.Model):
"""Model with ForeignKey to another model, for testing ModelForm
generation with ModelChoiceField."""
choice = models.ForeignKey(ChoiceOptionModel, blank=False,
default=lambda: ChoiceOptionModel.objects.get(name='default'))
choice_int = models.ForeignKey(ChoiceOptionModel, blank=False, related_name='choice_int',
default=lambda: 1)
multi_choice = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='multi_choice',
default=lambda: ChoiceOptionModel.objects.filter(name='default'))
multi_choice_int = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='multi_choice_int',
default=lambda: [1])
class FileModel(models.Model):
file = models.FileField(storage=temp_storage, upload_to='tests')
class Group(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return u'%s' % self.name
class Cheese(models.Model):
name = models.CharField(max_length=100)
|
gpl-3.0
| 3,011,430,865,817,720,000 | -4,756,592,234,976,457,000 | 32.472973 | 110 | 0.695196 | false |
gnieboer/tensorflow
|
tensorflow/contrib/learn/python/learn/preprocessing/__init__.py
|
138
|
1071
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing tools useful for building models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.preprocessing.categorical import *
from tensorflow.contrib.learn.python.learn.preprocessing.text import *
# pylint: enable=wildcard-import
|
apache-2.0
| 6,138,608,015,801,390,000 | 4,769,071,707,976,691,000 | 41.84 | 80 | 0.718954 | false |
cnbeining/ABPlayerHTML5-Py--nix
|
httpd.py
|
3
|
6069
|
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <[email protected]/[email protected]>
Filename: httpd.py
Type: httpd that support resume.
Last modified: 2011-06-27 17:38
Description:
"""
import os
import socket
import sys
from SocketServer import ThreadingMixIn
from random import randint
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import urllib2
import StringIO
import binascii
import urllib
import atexit
class NotracebackServer(ThreadingMixIn, HTTPServer):
"""
could make this a mixin, but decide to keep it simple for a simple script.
"""
def handle_error(self, *args):
"""override default function to disable traceback."""
pass
class PartialContentHandler(SimpleHTTPRequestHandler):
def mycopy(self, f):
"""
This would do the actual file tranfer. if client terminated transfer,
we would log it.
"""
try:
self.copyfile(f, self.wfile)
self.log_message('"%s" %s', self.requestline, "req finished.")
except socket.error:
self.log_message('"%s" %s', self.requestline, "req terminated.")
finally:
f.close()
return None
def do_GET(self):
"""Serve a GET request."""
#print('PATH:' + self.path)
if self.path.startswith('/__proxy__/'): #Reverse proxy this
#print()
url = self.path[11:]
print(url)
#request = urllib2.Request(url)
#response = urllib2.urlopen(request)
#print(response.code)
#self.send_response(response.code)
response = response_copy = urllib.urlopen(url)
response_code = response.code
self.send_response(response.code)
response_headers = response.headers
[self.send_header(i, response_headers[i]) for i in response_headers]
#print(self.headers['Origin'])
try:
self.send_header('Access-Control-Allow-Origin', self.headers['Origin'])
except:
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Credentials', 'true')
#print(dict(self.response_headers))
#print('HERE!')
self.end_headers()
self.copyfile(response, self.wfile)
return None
f = self.send_head()
if f:
self.mycopy(f)
def send_head(self):
"""
added support for partial content. i'm not surprised if http HEAD
method would fail.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
# oh, we do not support directory listing.
self.send_error(404, "File not found")
return None
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
if self.headers.get("Range"):
# partial content all treated here.
# we do not support If-Range request.
# range could only be of the form:
# Range: bytes=9855420-
start = self.headers.get("Range")
try:
pos = int(start[6:-1])
except ValueError:
self.send_error(400, "bad range specified.")
f.close()
return None
self.send_response(206)
self.send_header("Content-type", ctype)
self.send_header("Connection", "keep-alive")
fs = os.fstat(f.fileno())
full = fs.st_size
self.send_header("Content-Length", str(fs[6] - pos))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
start = start.replace("=", " ")
self.send_header("Content-Range", "%s%s/%s" % (start, full-1, full))
self.end_headers()
f.seek(pos)
self.mycopy(f)
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def main(port, server_class=NotracebackServer,
handler_class=PartialContentHandler):
os.setpgrp()
server_address = ('', port)
httpd = server_class(server_address, handler_class)
#httpd.serve_forever()
try: # Handle connections at the same time, so loading will not fail
while 1:
httpd.handle_request()
except Exception:
print "Finished"
os.killpg(os.getpid(), signal.SIGTERM)
os._exit(0)
os._exit(0)
#----------------------------------------------------------------------
def main2(port, folder):
""""""
sys.stdout.flush()
print(folder)
ip = '127.0.0.1'
print("serving on: http://%s:%s/" % (ip, port))
print("===== local files =====")
cwd = folder
for f in os.listdir(cwd):
if f == sys.argv[0] or f.startswith("."):
continue
fullpath = os.path.join(cwd, f)
if os.path.isfile(fullpath):
print("link: http://%s:%s/%s" % (ip, port, f))
print("===== start logging =====\n")
main(port=port)
os.killpg(os.getpid(), signal.SIGTERM)
if __name__ == "__main__":
port = 30000
#ip = socket.gethostbyname(socket.gethostname())
ip = '127.0.0.1'
print("serving on: http://%s:%s/" % (ip, port))
print("===== local files =====")
cwd = os.getcwd()
for f in os.listdir(cwd):
if f == sys.argv[0] or f.startswith("."):
continue
fullpath = os.path.join(cwd, f)
if os.path.isfile(fullpath):
print("link: http://%s:%s/%s" % (ip, port, f))
print("===== start logging =====\n")
main(port=port)
|
mit
| -1,747,569,454,897,956,600 | -5,996,103,871,896,991,000 | 31.459893 | 87 | 0.550503 | false |
AndresCidoncha/Python-Bot
|
telegram/user.py
|
5
|
2192
|
#!/usr/bin/env python
# pylint: disable=C0103,W0622
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram User"""
from telegram import TelegramObject
class User(TelegramObject):
"""This object represents a Telegram User.
Attributes:
id (int):
first_name (str):
last_name (str):
username (str):
type (str):
Args:
id (int):
first_name (str):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
type (Optional[str]):
last_name (Optional[str]):
username (Optional[str]):
"""
def __init__(self,
id,
first_name,
**kwargs):
# Required
self.id = int(id)
self.first_name = first_name
# Optionals
self.type = kwargs.get('type', '')
self.last_name = kwargs.get('last_name', '')
self.username = kwargs.get('username', '')
@property
def name(self):
"""str: """
if self.username:
return '@%s' % self.username
if self.last_name:
return '%s %s' % (self.first_name, self.last_name)
return self.first_name
@staticmethod
def de_json(data):
"""
Args:
data (str):
Returns:
telegram.User:
"""
if not data:
return None
return User(**data)
|
gpl-3.0
| -5,325,854,105,142,287,000 | -8,461,867,288,734,953,000 | 26.746835 | 77 | 0.600365 | false |
googyanas/GoogyMax-G4
|
Documentation/target/tcm_mod_builder.py
|
2358
|
40707
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
| -7,979,800,131,980,223,000 | 5,414,665,887,435,889,000 | 37.842557 | 162 | 0.572678 | false |
nacl-webkit/chrome_deps
|
third_party/mesa/MesaLib/src/gallium/docs/source/exts/formatting.py
|
52
|
1120
|
# formatting.py
# Sphinx extension providing formatting for Gallium-specific data
# (c) Corbin Simpson 2010
# Public domain to the extent permitted; contact author for special licensing
import docutils.nodes
import sphinx.addnodes
def parse_envvar(env, sig, signode):
envvar, t, default = sig.split(" ", 2)
envvar = envvar.strip().upper()
t = " Type: %s" % t.strip(" <>").lower()
default = " Default: %s" % default.strip(" ()")
signode += sphinx.addnodes.desc_name(envvar, envvar)
signode += sphinx.addnodes.desc_type(t, t)
signode += sphinx.addnodes.desc_annotation(default, default)
return envvar
def parse_opcode(env, sig, signode):
opcode, desc = sig.split("-", 1)
opcode = opcode.strip().upper()
desc = " (%s)" % desc.strip()
signode += sphinx.addnodes.desc_name(opcode, opcode)
signode += sphinx.addnodes.desc_annotation(desc, desc)
return opcode
def setup(app):
app.add_description_unit("envvar", "envvar", "%s (environment variable)",
parse_envvar)
app.add_description_unit("opcode", "opcode", "%s (TGSI opcode)",
parse_opcode)
|
bsd-3-clause
| 3,811,477,348,597,113,300 | 1,613,079,933,998,514,400 | 35.129032 | 77 | 0.666071 | false |
40223117cda/w16cdaa
|
static/Brython3.1.0-20150301-090019/Lib/ui/widget.py
|
706
|
1774
|
import random
from browser import doc
def getMousePosition(e):
if e is None:
e=win.event
if e.pageX or e.pageY:
return {'x': e.pageX, 'y': e.pageY}
if e.clientX or e.clientY:
_posx=e.clientX + doc.body.scrollLeft + doc.documentElement.scrollLeft;
_posy=e.clientY + doc.body.scrollTop + doc.documentElement.scrollTop;
return {'x': _posx, 'y': _posy}
return {'x': 0, 'y': 0}
class Widget:
def __init__(self, element, type, id=None):
self._element=element
if id is None:
self._element.id='%s_%s' % (type, int(100000*random.random()))
else:
self._element.id=id
def get_id(self):
return self._element.id
def attach(self, element_id):
""" append this DOM component to DOM element element_id"""
#document[element_id] <= self._element #this doesn't work :(
#doc is actually the global 'doc' not the one we imported from browser :(
doc[element_id] <= self._element
def show(self):
self._element.display='block'
def hide(self):
self._element.display='none'
class DraggableWidget(Widget):
def __init__(self, element, type, id=None):
Widget.__init__(self, element, type, id)
def drag(e):
self._element.style.top='%spx' % (e.clientY - self._deltaY)
self._element.style.left='%spx' % (e.clientX - self._deltaX)
def mouseDown(e):
self._element.style.position='absolute'
self._deltaX=e.clientX - self._element.offsetLeft
self._deltaY=e.clientY - self._element.offsetTop
doc.bind('mousemove', drag)
def mouseUp(e):
doc.unbind('mousemove')
self._element.bind('mousedown', mouseDown)
self._element.bind('mouseup', mouseUp)
|
gpl-3.0
| -1,738,982,172,972,774,000 | -5,254,803,437,733,621,000 | 28.566667 | 79 | 0.606539 | false |
knifenomad/django
|
django/contrib/postgres/forms/array.py
|
258
|
6743
|
import copy
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import string_concat, ugettext_lazy as _
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for i, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def render(self, name, value, attrs=None):
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return ''.join(rendered_widgets)
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for i in range(max_size):
item = value[i]
try:
cleaned_data.append(self.base_field.clean(item))
errors.append(None)
except ValidationError as error:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
cleaned_data.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(errors)
return cleaned_data
|
bsd-3-clause
| 7,221,129,882,879,944,000 | -7,790,846,024,166,999,000 | 35.058824 | 102 | 0.561026 | false |
disigma/depot_tools
|
third_party/oauth2client/tools.py
|
171
|
8344
|
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = '[email protected] (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import BaseHTTPServer
import argparse
import httplib2
import logging
import os
import socket
import sys
import webbrowser
from oauth2client import client
from oauth2client import file
from oauth2client import util
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
# run_parser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
argparser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
argparser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
argparser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR',
'CRITICAL'],
help='Set the logging level of detail.')
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.run_parser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from old_run import run
from old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
|
bsd-3-clause
| -7,267,708,399,925,519,000 | 8,428,066,793,993,217,000 | 33.337449 | 80 | 0.698226 | false |
russelmahmud/mess-account
|
django/contrib/localflavor/uy/forms.py
|
310
|
2083
|
# -*- coding: utf-8 -*-
"""
UY-specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms.fields import Select, RegexField
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.uy.util import get_validation_digit
class UYDepartamentSelect(Select):
"""
A Select widget that uses a list of Uruguayan departaments as its choices.
"""
def __init__(self, attrs=None):
from uy_departaments import DEPARTAMENT_CHOICES
super(UYDepartamentSelect, self).__init__(attrs, choices=DEPARTAMENT_CHOICES)
class UYCIField(RegexField):
"""
A field that validates Uruguayan 'Cedula de identidad' (CI) numbers.
"""
default_error_messages = {
'invalid': _("Enter a valid CI number in X.XXX.XXX-X,"
"XXXXXXX-X or XXXXXXXX format."),
'invalid_validation_digit': _("Enter a valid CI number."),
}
def __init__(self, *args, **kwargs):
super(UYCIField, self).__init__(r'(?P<num>(\d{6,7}|(\d\.)?\d{3}\.\d{3}))-?(?P<val>\d)',
*args, **kwargs)
def clean(self, value):
"""
Validates format and validation digit.
The official format is [X.]XXX.XXX-X but usually dots and/or slash are
omitted so, when validating, those characters are ignored if found in
the correct place. The three typically used formats are supported:
[X]XXXXXXX, [X]XXXXXX-X and [X.]XXX.XXX-X.
"""
value = super(UYCIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = self.regex.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = int(match.group('num').replace('.', ''))
validation_digit = int(match.group('val'))
if not validation_digit == get_validation_digit(number):
raise ValidationError(self.error_messages['invalid_validation_digit'])
return value
|
bsd-3-clause
| 8,106,152,350,979,049,000 | -3,655,424,540,144,631,300 | 33.716667 | 95 | 0.62554 | false |
hiteshagrawal/python
|
battleship3.py
|
1
|
1558
|
#!/usr/bin/python
import random
board = []
size = 0
def board_print():
global board
for row in board:
print " ".join(row)
def new_game():
global board,size
board = []
size = input("Enter board size:") ## Input will automatically convert it to integer
for row in range(size):
board.append(["0"] * size )
board_print()
play(3)
def play(turn):
global board
my_ship_row = random.randrange(size)
my_ship_col = random.randrange(size)
print "My ship position is row: %d , column: %d" %(my_ship_row,my_ship_col)
while turn > 0:
print "Remaining number of chances: %d"%(turn)
guess_row = input("Please enter a row number:")
guess_col = input("Please enter a col number:")
if my_ship_row == guess_row and my_ship_col == guess_col:
print "You busted my ship"
board[guess_row][guess_col] = "X"
board_print()
print "You have WON, starting a new game"
new_game()
elif (guess_row + 1 ) > size or (guess_col + 1) > size:
print "This is not even in the ocean, try again"
continue
elif board[guess_row][guess_col] != "-" :
print "My ship is not at this location"
board[guess_row][guess_col] = "-"
board_print()
turn -= 1
elif board[guess_row][guess_col] == "-" :
print "You already guess this location, try again"
board_print()
continue
else:
print "Remaining number of chances: %d"%(turn)
board[my_ship_row][my_ship_col] = "X"
print "My Ship was at location \"X\""
board_print()
print "You have lost, starting a new game"
new_game()
#board_print()
new_game()
|
gpl-2.0
| 6,307,790,749,506,179,000 | -4,522,251,163,902,227,000 | 23.34375 | 84 | 0.639281 | false |
windyuuy/opera
|
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_unittest.py
|
124
|
7300
|
# Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user import User
class UserTest(unittest.TestCase):
example_user_response = "example user response"
def test_prompt_repeat(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
if not self.repeatsRemaining:
return UserTest.example_user_response
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response)
def test_prompt_when_exceeded_repeats(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None)
def test_prompt_with_multiple_lists(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_multiple_lists,
args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["3"], "foobar")
run_prompt_test(["4"], "barbaz")
run_prompt_test(["5"], "foobaz")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
def test_prompt_with_list(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_list,
args=["title", ["foo", "bar"]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n 1. foo\n 2. bar\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
def test_confirm(self):
test_cases = (
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
(("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
)
for test_case in test_cases:
expected, inputs = test_case
def mock_raw_input(message):
self.assertEqual(expected[0], message)
return inputs[1]
result = User().confirm(default=inputs[0],
raw_input=mock_raw_input)
self.assertEqual(expected[1], result)
def test_warn_if_application_is_xcode(self):
output = OutputCapture()
user = User()
output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"]) # case sensitive matching
xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n"
output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning)
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning)
|
bsd-3-clause
| -6,382,373,016,804,748,000 | -8,102,057,743,590,662,000 | 51.517986 | 141 | 0.623151 | false |
commshare/testLiveSRS
|
trunk/objs/CherryPy-3.2.4/cherrypy/test/test_auth_basic.py
|
54
|
2853
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
import cherrypy
from cherrypy._cpcompat import md5, ntob
from cherrypy.lib import auth_basic
from cherrypy.test import helper
class BasicAuthTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class BasicProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
class BasicProtected2:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
userpassdict = {'xuser' : 'xpassword'}
userhashdict = {'xuser' : md5(ntob('xpassword')).hexdigest()}
def checkpasshash(realm, user, password):
p = userhashdict.get(user)
return p and p == md5(ntob(password)).hexdigest() or False
conf = {'/basic': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': auth_basic.checkpassword_dict(userpassdict)},
'/basic2': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash},
}
root = Root()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
setup_server = staticmethod(setup_server)
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testBasic(self):
self.getPage("/basic/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2(self):
self.getPage("/basic2/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
|
mit
| -8,631,221,163,864,515,000 | 3,464,600,789,004,194,000 | 35.113924 | 106 | 0.596916 | false |
chromium/chromium
|
third_party/blink/tools/blinkpy/web_tests/port/driver.py
|
1
|
30937
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import logging
import re
import shlex
import six
import time
from blinkpy.common.system import path
from blinkpy.common.system.profiler import ProfilerFactory
_log = logging.getLogger(__name__)
DRIVER_START_TIMEOUT_SECS = 30
def coalesce_repeated_switches(cmd):
"""Combines known repeated command line switches.
Repetition of a switch notably happens when both per-test switches and the
additional driver flags specify different --enable-features. For instance:
--enable-features=X --enable-features=Y
Conceptually, this indicates to enable features X and Y. However
Chrome's command line parsing only applies the last seen switch, resulting
in only feature Y being enabled.
To solve this, transform it to:
--enable-features=X,Y
"""
def parse_csv_switch(prefix, switch, values_set):
"""If |switch| starts with |prefix|, parses it as a comma-separated
list of values and adds them all to |values_set|. Returns False if the
switch was not a match for |prefix|."""
if not switch.startswith(prefix):
return False
values = switch[len(prefix):].split(',')
for value in values:
values_set.add(value)
return True
def add_csv_switch(prefix, values_set, result):
if len(values_set) == 0:
return
sorted_values = sorted(list(values_set))
result.append('%s%s' % (prefix, ','.join(sorted_values)))
result = []
ENABLE_FEATURES_FLAG = '--enable-features='
DISABLE_FEATURES_FLAG = '--disable-features='
enabled_set = set()
disabled_set = set()
for switch in cmd:
if parse_csv_switch(ENABLE_FEATURES_FLAG, switch, enabled_set):
continue
if parse_csv_switch(DISABLE_FEATURES_FLAG, switch, disabled_set):
continue
result.append(switch)
# Append any coalesced (comma separated) flags to the end.
add_csv_switch(ENABLE_FEATURES_FLAG, enabled_set, result)
add_csv_switch(DISABLE_FEATURES_FLAG, disabled_set, result)
return result
class DriverInput(object):
def __init__(self, test_name, timeout, image_hash, args):
self.test_name = test_name
self.timeout = timeout # in ms
self.image_hash = image_hash
self.args = args
class DriverOutput(object):
"""Groups information about a output from driver for easy passing
and post-processing of data.
"""
def __init__(self,
text,
image,
image_hash,
audio,
crash=False,
test_time=0,
measurements=None,
timeout=False,
error='',
crashed_process_name='??',
crashed_pid=None,
crash_log=None,
crash_site=None,
leak=False,
leak_log=None,
pid=None,
command=None):
# FIXME: Args could be renamed to better clarify what they do.
self.text = text
self.image = image # May be empty-string if the test crashes.
self.image_hash = image_hash
self.image_diff = None # image_diff gets filled in after construction.
self.audio = audio # Binary format is port-dependent.
self.crash = crash
self.crashed_process_name = crashed_process_name
self.crashed_pid = crashed_pid
self.crash_log = crash_log
self.crash_site = crash_site
self.leak = leak
self.leak_log = leak_log
self.test_time = test_time
self.measurements = measurements
self.timeout = timeout
self.error = error # stderr output
self.pid = pid
self.command = command
def has_stderr(self):
return bool(self.error)
class DeviceFailure(Exception):
pass
class Driver(object):
"""object for running test(s) using content_shell or other driver."""
def __init__(self, port, worker_number, no_timeout=False):
"""Initialize a Driver to subsequently run tests.
Typically this routine will spawn content_shell in a config
ready for subsequent input.
port - reference back to the port object.
worker_number - identifier for a particular worker/driver instance
"""
self.WPT_DIRS = port.WPT_DIRS
self._port = port
self._worker_number = worker_number
self._no_timeout = no_timeout
self._driver_tempdir = None
# content_shell can report back subprocess crashes by printing
# "#CRASHED - PROCESSNAME". Since those can happen at any time
# and ServerProcess won't be aware of them (since the actual tool
# didn't crash, just a subprocess) we record the crashed subprocess name here.
self._crashed_process_name = None
self._crashed_pid = None
# content_shell can report back subprocesses that became unresponsive
# This could mean they crashed.
self._subprocess_was_unresponsive = False
# content_shell can report back subprocess DOM-object leaks by printing
# "#LEAK". This leak detection is enabled only when the flag
# --enable-leak-detection is passed to content_shell.
self._leaked = False
self._leak_log = None
# stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
# stderr output, as well as if we've seen #EOF on this driver instance.
# FIXME: We should probably remove _read_first_block and _read_optional_image_block and
# instead scope these locally in run_test.
self.error_from_test = bytearray()
self.err_seen_eof = False
self._server_process = None
self._current_cmd_line = None
self._measurements = {}
if self._port.get_option('profile'):
profiler_name = self._port.get_option('profiler')
self._profiler = ProfilerFactory.create_profiler(
self._port.host,
self._port._path_to_driver(), # pylint: disable=protected-access
self._port.artifacts_directory(),
profiler_name)
else:
self._profiler = None
def __del__(self):
self.stop()
def run_test(self, driver_input):
"""Run a single test and return the results.
Note that it is okay if a test times out or crashes. content_shell
will be stopped when the test ends, and then restarted for the next
test when this function is invoked again. As part of the restart, the
state of Driver will be reset.
Returns a DriverOutput object.
"""
start_time = time.time()
stdin_deadline = start_time + int(driver_input.timeout) / 2000.0
self.start(driver_input.args, stdin_deadline)
test_begin_time = time.time()
self.error_from_test = bytearray()
self.err_seen_eof = False
test_command = self._command_from_driver_input(driver_input)
server_process_command = self._server_process.cmd()
deadline = test_begin_time + int(driver_input.timeout) / 1000.0
self._server_process.write(test_command.encode('utf8', 'replace'))
# First block is either text or audio
text, audio = self._read_first_block(deadline)
# The second (optional) block is image data.
image, actual_image_hash = self._read_optional_image_block(deadline)
crashed = self.has_crashed()
timed_out = self._server_process.timed_out
pid = self._server_process.pid()
leaked = self._leaked
if not crashed:
sanitizer = self._port.output_contains_sanitizer_messages(
self.error_from_test)
if sanitizer:
self.error_from_test = 'OUTPUT CONTAINS "sanitizer",' + \
' so we are treating this test as if it crashed, even though it did not.\n\n' + self.error_from_test
crashed = True
self._crashed_process_name = 'unknown process name'
self._crashed_pid = 0
if crashed or timed_out or leaked:
# We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
# In the timeout case, we kill the hung process as well.
# Add a delay to allow process to finish post-run hooks, such as dumping code coverage data.
out, err = self._server_process.stop(
self._port.get_option('driver_kill_timeout_secs'))
if out:
text += out
if err:
self.error_from_test += err
self._server_process = None
crash_log = None
crash_site = None
if crashed:
self.error_from_test, crash_log, crash_site = self._get_crash_log(
text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
if not crash_log:
pid_str = str(
self._crashed_pid) if self._crashed_pid else 'unknown pid'
crash_log = 'No crash log found for %s:%s.\n' % (
self._crashed_process_name, pid_str)
# If we were unresponsive append a message informing there may not have been a crash.
if self._subprocess_was_unresponsive:
crash_log += 'Process failed to become responsive before timing out.\n'
# Print stdout and stderr to the placeholder crash log; we want as much context as possible.
if self.error_from_test:
crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (
text, self.error_from_test)
command = ("%s %s" %
(" ".join(server_process_command), test_command)).encode(
'ascii', 'replace')
if actual_image_hash:
actual_image_hash = actual_image_hash.decode('utf8', 'replace')
return DriverOutput(text,
image,
actual_image_hash,
audio,
crash=crashed,
test_time=time.time() - test_begin_time,
measurements=self._measurements,
timeout=timed_out,
error=self.error_from_test,
crashed_process_name=self._crashed_process_name,
crashed_pid=self._crashed_pid,
crash_log=crash_log,
crash_site=crash_site,
leak=leaked,
leak_log=self._leak_log,
pid=pid,
command=command)
def _get_crash_log(self, stdout, stderr, newer_than):
# pylint: disable=protected-access
return self._port._get_crash_log(self._crashed_process_name,
self._crashed_pid, stdout, stderr,
newer_than)
# FIXME: Seems this could just be inlined into callers.
@classmethod
def _command_wrapper(cls, wrapper_option):
# Hook for injecting valgrind or other runtime instrumentation,
# used by e.g. tools/valgrind/valgrind_tests.py.
return shlex.split(wrapper_option) if wrapper_option else []
# The *_HOST_AND_PORTS tuples are (hostname, insecure_port, secure_port),
# i.e. the information needed to create HTTP and HTTPS URLs.
# TODO(burnik): Read from config or args.
HTTP_DIR = 'http/tests/'
HTTP_LOCAL_DIR = 'http/tests/local/'
HTTP_HOST_AND_PORTS = ('127.0.0.1', 8000, 8443)
WPT_HOST_AND_PORTS = ('web-platform.test', 8001, 8444)
WPT_H2_PORT = 9000
def is_http_test(self, test_name):
return (test_name.startswith(self.HTTP_DIR)
and not test_name.startswith(self.HTTP_LOCAL_DIR))
def test_to_uri(self, test_name):
"""Convert a test name to a URI.
Tests which have an 'https' directory in their paths or '.https.' or
'.serviceworker.' in their name will be loaded over HTTPS; all other
tests over HTTP. Example paths loaded over HTTPS:
http/tests/security/mixedContent/https/test1.html
http/tests/security/mixedContent/test1.https.html
external/wpt/encoding/idlharness.any.serviceworker.html
"""
using_wptserve = self._port.should_use_wptserve(test_name)
if not self.is_http_test(test_name) and not using_wptserve:
return path.abspath_to_uri(self._port.host.platform,
self._port.abspath_for_test(test_name))
if using_wptserve:
for wpt_path, url_prefix in self.WPT_DIRS.items():
# The keys of WPT_DIRS do not have trailing slashes.
wpt_path += '/'
if test_name.startswith(wpt_path):
test_dir_prefix = wpt_path
test_url_prefix = url_prefix
break
else:
# We really shouldn't reach here, but in case we do, fail gracefully.
_log.error('Unrecognized WPT test name: %s', test_name)
test_dir_prefix = 'external/wpt/'
test_url_prefix = '/'
hostname, insecure_port, secure_port = self.WPT_HOST_AND_PORTS
if '.www.' in test_name:
hostname = "www.%s" % hostname
if '.h2.' in test_name:
secure_port = self.WPT_H2_PORT
else:
test_dir_prefix = self.HTTP_DIR
test_url_prefix = '/'
hostname, insecure_port, secure_port = self.HTTP_HOST_AND_PORTS
relative_path = test_name[len(test_dir_prefix):]
if ('/https/' in test_name or '.https.' in test_name
or '.h2.' in test_name or '.serviceworker.' in test_name
or '.serviceworker-module.' in test_name):
return 'https://%s:%d%s%s' % (hostname, secure_port,
test_url_prefix, relative_path)
return 'http://%s:%d%s%s' % (hostname, insecure_port, test_url_prefix,
relative_path)
def _get_uri_prefixes(self, hostname, insecure_port, secure_port):
"""Returns the HTTP and HTTPS URI prefix for a hostname."""
return [
'http://%s:%d/' % (hostname, insecure_port),
'https://%s:%d/' % (hostname, secure_port)
]
def uri_to_test(self, uri):
"""Return the base web test name for a given URI.
This returns the test name for a given URI, e.g., if you passed in
"file:///src/web_tests/fast/html/keygen.html" it would return
"fast/html/keygen.html".
"""
if uri.startswith('file:///'):
prefix = path.abspath_to_uri(self._port.host.platform,
self._port.web_tests_dir())
if not prefix.endswith('/'):
prefix += '/'
return uri[len(prefix):]
for prefix in self._get_uri_prefixes(*self.HTTP_HOST_AND_PORTS):
if uri.startswith(prefix):
return self.HTTP_DIR + uri[len(prefix):]
for prefix in self._get_uri_prefixes(*self.WPT_HOST_AND_PORTS):
if uri.startswith(prefix):
url_path = '/' + uri[len(prefix):]
for wpt_path, url_prefix in self.WPT_DIRS.items():
if url_path.startswith(url_prefix):
return wpt_path + '/' + url_path[len(url_prefix):]
raise NotImplementedError('unknown url type: %s' % uri)
def has_crashed(self):
if self._server_process is None:
return False
if self._crashed_process_name:
return True
if self._server_process.has_crashed():
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
return True
return False
def start(self, per_test_args, deadline):
new_cmd_line = self.cmd_line(per_test_args)
if not self._server_process or new_cmd_line != self._current_cmd_line:
self._start(per_test_args)
self._run_post_start_tasks()
def _setup_environ_for_driver(self, environment):
if self._profiler:
environment = self._profiler.adjusted_environment(environment)
return environment
def _initialize_server_process(self, server_name, cmd_line, environment):
self._server_process = self._port.server_process_constructor(
self._port,
server_name,
cmd_line,
environment,
more_logging=self._port.get_option('driver_logging'))
def _start(self, per_test_args, wait_for_ready=True):
self.stop()
self._driver_tempdir = self._port.host.filesystem.mkdtemp(
prefix='%s-' % self._port.driver_name())
server_name = self._port.driver_name()
environment = self._port.setup_environ_for_server()
environment = self._setup_environ_for_driver(environment)
self._crashed_process_name = None
self._crashed_pid = None
self._leaked = False
cmd_line = self.cmd_line(per_test_args)
self._initialize_server_process(server_name, cmd_line, environment)
self._server_process.start()
self._current_cmd_line = cmd_line
if wait_for_ready:
deadline = time.time() + DRIVER_START_TIMEOUT_SECS
if not self._wait_for_server_process_output(
self._server_process, deadline, b'#READY'):
_log.error('%s took too long to startup.' % server_name)
def _wait_for_server_process_output(self, server_process, deadline, text):
output = b''
line = server_process.read_stdout_line(deadline)
output += server_process.pop_all_buffered_stderr()
while (not server_process.timed_out
and not server_process.has_crashed()
and not text in line.rstrip()):
output += line
line = server_process.read_stdout_line(deadline)
output += server_process.pop_all_buffered_stderr()
if server_process.timed_out:
_log.error('Timed out while waiting for the %s process: \n"%s"',
server_process.name(), output)
return False
if server_process.has_crashed():
_log.error('The %s process crashed while starting: \n"%s"',
server_process.name(), output)
return False
return True
def _run_post_start_tasks(self):
# Remote drivers may override this to delay post-start tasks until the server has ack'd.
if self._profiler:
self._profiler.attach_to_pid(self._pid_on_target())
def _pid_on_target(self):
# Remote drivers will override this method to return the pid on the device.
return self._server_process.pid()
def stop(self, timeout_secs=None):
if timeout_secs is None:
# Add a delay to allow process to finish post-run hooks, such as dumping code coverage data.
timeout_secs = self._port.get_option('driver_kill_timeout_secs')
if self._server_process:
self._server_process.stop(timeout_secs)
self._server_process = None
if self._profiler:
self._profiler.profile_after_exit()
if self._driver_tempdir:
self._port.host.filesystem.rmtree(str(self._driver_tempdir))
self._driver_tempdir = None
self._current_cmd_line = None
def _base_cmd_line(self):
return [self._port._path_to_driver()] # pylint: disable=protected-access
def cmd_line(self, per_test_args):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd += self._base_cmd_line()
if self._no_timeout:
cmd.append('--no-timeout')
cmd.extend(self._port.additional_driver_flags())
if self._port.get_option('enable_leak_detection'):
cmd.append('--enable-leak-detection')
cmd.extend(per_test_args)
cmd = coalesce_repeated_switches(cmd)
cmd.append('-')
return cmd
def _check_for_driver_crash(self, error_line):
if error_line == '#CRASHED\n':
# This is used on Windows to report that the process has crashed
# See http://trac.webkit.org/changeset/65537.
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
elif (error_line.startswith('#CRASHED - ')
or error_line.startswith('#PROCESS UNRESPONSIVE - ')):
# WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
match = re.match(r'#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)',
error_line)
self._crashed_process_name = (match.group(1)
if match else 'WebProcess')
match = re.search(r'pid (\d+)', error_line)
pid = int(match.group(1)) if match else None
self._crashed_pid = pid
# FIXME: delete this after we're sure this code is working :)
_log.debug('%s crash, pid = %s, error_line = %s',
self._crashed_process_name, str(pid), error_line)
if error_line.startswith('#PROCESS UNRESPONSIVE - '):
self._subprocess_was_unresponsive = True
self._port.sample_process(self._crashed_process_name,
self._crashed_pid)
# We want to show this since it's not a regular crash and probably we don't have a crash log.
self.error_from_test += error_line
return True
return self.has_crashed()
def _check_for_leak(self, error_line):
if error_line.startswith('#LEAK - '):
self._leaked = True
match = re.match(r'#LEAK - (\S+) pid (\d+) (.+)\n', error_line)
self._leak_log = match.group(3)
return self._leaked
def _command_from_driver_input(self, driver_input):
# FIXME: performance tests pass in full URLs instead of test names.
if (driver_input.test_name.startswith('http://')
or driver_input.test_name.startswith('https://')
or driver_input.test_name == ('about:blank')):
command = driver_input.test_name
elif (self.is_http_test(driver_input.test_name)
or self._port.should_use_wptserve(driver_input.test_name)):
command = self.test_to_uri(driver_input.test_name)
else:
command = self._port.abspath_for_test(driver_input.test_name)
# ' is the separator between arguments.
if self._port.supports_per_test_timeout():
command += "'--timeout'%s" % driver_input.timeout
if driver_input.image_hash:
command += "'" + driver_input.image_hash
return command + '\n'
def _read_first_block(self, deadline):
# returns (text_content, audio_content)
block = self._read_block(deadline)
if block.malloc:
self._measurements['Malloc'] = float(block.malloc)
if block.js_heap:
self._measurements['JSHeap'] = float(block.js_heap)
if block.content_type == b'audio/wav':
return (None, block.decoded_content)
return (block.decoded_content, None)
def _read_optional_image_block(self, deadline):
# returns (image, actual_image_hash)
block = self._read_block(deadline, wait_for_stderr_eof=True)
if block.content and block.content_type == b'image/png':
return (block.decoded_content, block.content_hash)
return (None, block.content_hash)
def _read_header(self,
block,
line,
header_text,
header_attr,
header_filter=None):
if (line.startswith(header_text)
and getattr(block, header_attr) is None):
value = line.split()[1]
if header_filter:
value = header_filter(value)
setattr(block, header_attr, value)
return True
return False
def _process_stdout_line(self, block, line):
if (self._read_header(block, line, b'Content-Type: ', 'content_type')
or self._read_header(
block, line, b'Content-Transfer-Encoding: ', 'encoding')
or self._read_header(block, line, b'Content-Length: ',
'_content_length', int) or
self._read_header(block, line, b'ActualHash: ', 'content_hash')
or self._read_header(block, line, b'DumpMalloc: ', 'malloc')
or self._read_header(block, line, b'DumpJSHeap: ', 'js_heap')
or self._read_header(block, line, b'StdinPath', 'stdin_path')):
return
# Note, we're not reading ExpectedHash: here, but we could.
# If the line wasn't a header, we just append it to the content.
block.content += line
def _strip_eof(self, line):
if line and line.endswith(b'#EOF\n'):
return line[:-5], True
if line and line.endswith(b'#EOF\r\n'):
_log.error('Got a CRLF-terminated #EOF - this is a driver bug.')
return line[:-6], True
return line, False
def _read_block(self, deadline, wait_for_stderr_eof=False):
block = ContentBlock()
out_seen_eof = False
while not self.has_crashed():
if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
break
if self.err_seen_eof:
out_line = self._server_process.read_stdout_line(deadline)
err_line = None
elif out_seen_eof:
out_line = None
err_line = self._server_process.read_stderr_line(deadline)
else:
out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(
deadline)
if self._server_process.timed_out or self.has_crashed():
break
if out_line:
assert not out_seen_eof
out_line, out_seen_eof = self._strip_eof(out_line)
if err_line:
assert not self.err_seen_eof
err_line, self.err_seen_eof = self._strip_eof(err_line)
if out_line:
if not out_line.endswith(b'\n'):
_log.error(
'Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug.'
)
# pylint: disable=protected-access
content_length_before_header_check = block._content_length
self._process_stdout_line(block, out_line)
# FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
# Don't wait until we're done with headers, just read the binary blob right now.
if content_length_before_header_check != block._content_length:
if block._content_length > 0:
block.content = self._server_process.read_stdout(
deadline, block._content_length)
else:
_log.error(
'Received content of type %s with Content-Length of 0! This indicates a bug in %s.',
block.content_type, self._server_process.name())
if err_line:
if self._check_for_driver_crash(
err_line.decode('utf8', 'replace')):
break
if self._check_for_leak(err_line.decode('utf8', 'replace')):
break
self.error_from_test += err_line
block.decode_content()
return block
class ContentBlock(object):
def __init__(self):
self.content_type = None
self.encoding = None
self.content_hash = None
self._content_length = None
# Content is treated as binary data even though the text output is usually UTF-8.
# FIXME: Should be bytearray() once we require Python 2.6.
# TODO(crbug/1197331): Keeping PY2 as str() for now, as diffing modules
# need to be looked into for PY3 unified_diff.py and html_diff.py
if six.PY2:
self.content = str()
else:
self.content = bytearray()
self.decoded_content = None
self.malloc = None
self.js_heap = None
self.stdin_path = None
def decode_content(self):
if self.encoding == 'base64' and self.content is not None:
self.decoded_content = base64.b64decode(self.content)
else:
self.decoded_content = self.content
|
bsd-3-clause
| -7,480,429,755,094,713,000 | 1,300,366,199,984,418,800 | 41.437586 | 127 | 0.580729 | false |
Barrog/C4-Datapack
|
data/jscript/quests/343_UnderTheShadowOfTheIvoryTower/__init__.py
|
1
|
7613
|
# Made by mtrix - v0.2 by DrLecter
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
ORB = 4364
ECTOPLASM = 4365
ADENA = 57
CHANCE = 65
RANDOM_REWARDS=[[951,1], #Enchant Weapon C
[955,1], #Enchant Weapon D
[2511,550],#SpiritShot: Grade C
[736,1], #SoE
]
#Roshambo
OPTIONS={0:"Scissors",1:"Rock",2:"Paper"}
OUTCOME={0:1,1:2,2:0}
#Coin Toss
TOSS={0:"Heads",1:"Tails"}
ORBS=[10,30,70,150,310,0]
#Messages
start_msg=["One~ Two~ Three~","Go! One~ Two~ Three~","Ready? Go! One~ Two~ Three~","Here we go! One~ Two~ Three~"]
tie_msg=["Ah ha! A tie! Take back the orbs that you bet. Well, shall we play again?",\
"Ha! A tie! Take back the orbs that you bet. Shall we try again?"]
win_msg=["Well, you certainly got lucky that time! Take all the orbs we put up as a bet. Come on! Let's play another round!",\
"Oh no! I lose! Go ahead. Take all the orbs we put up as a bet. Come on! Let's play again!",\
"Oh no! I lose! Go ahead. Take all the orbs we put up as a bet. Humph... Come on! Let's play again!"]
lose_msg=["Oh, too bad. You lose! Shall we play another round?",\
"Oh...! You lose! Oh well, the orbs are mine. Shall we play another round?",\
"Oh, too bad, you lose! I'll take those orbs now... Hey now, shall we play another round?"]
again_msg=["Play the game.","Play the rock paper scissors game."]
toss_msg=[["You're right!","You win!"],\
["Hu wah! Right again!","You won twice in a row!"],\
["Hu wah! Right again!","You won three times in a row!"],\
["Ho ho! Right again!","You won four times in a row!"]]
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
marsha = st.getRandom(3)
random2 = st.getRandom(2)
orbs = st.getQuestItemsCount(ORB)
if event == "7834-02.htm" :
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "7834-05.htm" :
if orbs :
st.giveItems(ADENA,orbs*125)
st.takeItems(ORB,-1)
else :
htmltext = "7834-09.htm"
elif event == "7835-02.htm":
if st.getQuestItemsCount(ECTOPLASM) :
st.takeItems(ECTOPLASM,1)
item=RANDOM_REWARDS[st.getRandom(len(RANDOM_REWARDS))]
st.giveItems(item[0],item[1])
htmltext="7835-02a.htm"
elif event == "7934-02.htm" :
if orbs < 10 :
htmltext = "7934-03a.htm"
else:
st.set("rps_1sttime","1")
elif event == "7934-03.htm" :
if orbs>=10 :
st.takeItems(ORB,10)
st.set("playing","1")
htmltext = st.showHtmlFile("7934-03.htm").replace("<msg>", start_msg[st.getRandom(len(start_msg))])
else :
htmltext = "7934-03a.htm"
elif event in [ "1","2","3" ]:
if st.getInt("playing"):
player=int(event)-1
if OUTCOME[player] == marsha:
msg=lose_msg
elif OUTCOME[marsha] == player:
st.giveItems(ORB,20)
msg=win_msg
else:
st.giveItems(ORB,10)
msg=tie_msg
st.unset("playing")
htmltext = st.showHtmlFile("7934-04.htm").replace("%player%", OPTIONS[player]).\
replace("%marsha%", OPTIONS[marsha]).replace("%msg%", msg[st.getRandom(len(msg))]).\
replace("%again%", again_msg[st.getRandom(len(again_msg))])
else:
htmltext="Player is cheating"
st.takeItems(ORB,-1)
elif event == "7935-02.htm" :
if orbs < 10 :
htmltext = "7935-02a.htm"
else:
st.set("ct_1sttime","1")
elif event == "7935-03.htm" :
if orbs>=10 :
st.set("toss","1")
else :
st.unset("row")
htmltext = "7935-02a.htm"
elif event in ["4","5"] :
if st.getInt("toss"):
if orbs>=10:
if random2==int(event)-4 :
row = st.getInt("row")
if row<4 :
row += 1
template="7935-06d.htm"
else:
st.giveItems(ORB,310)
row=0
template="7935-06c.htm"
else :
row = 0
st.takeItems(ORB,10)
template="7935-06b.htm"
st.set("row",str(row))
htmltext = st.showHtmlFile(template).replace("%toss%",TOSS[random2]).\
replace("%msg1%",toss_msg[row-1][0]).replace("%msg2%",toss_msg[row-1][1]).\
replace("%orbs%",str(ORBS[row-1])).replace("%next%",str(ORBS[row]))
else:
st.unset("row")
htmltext = "7935-02a.htm"
st.unset("toss")
else:
st.takeItems(ORB,-1)
htmltext="Player is cheating"
elif event == "quit":
if st.getInt("row"):
qty=st.getInt("row")-1
st.giveItems(ORB,ORBS[qty])
st.unset("row")
htmltext = st.showHtmlFile("7935-06a.htm").replace("%nebulites%",str(ORBS[qty]))
else:
st.takeItems(ORB,-1)
htmltext="Player is cheating"
elif event in ["7834-06.htm","7834-02b.htm"] :
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
return htmltext
def onTalk (Self,npc,st):
npcId = npc.getNpcId()
htmltext = "<html><head><body>I have nothing to say you</body></html>"
id = st.getState()
level = st.getPlayer().getLevel()
cond = st.getInt("cond")
if npcId==7834 :
if id == CREATED :
if st.getPlayer().getClassId().getId() in [ 0x11,0xc,0xd,0xe,0x10,0x1a,0x1b,0x1c,0x1e,0x28,0x29,0x2b,0x5e,0x5f,0x60,0x61,0x62,0x67,0x68,0x69,0x6e,0x6f,0x70]:
if level >= 40:
htmltext = "7834-01.htm"
else:
htmltext = "7834-01a.htm"
st.exitQuest(1)
else:
htmltext = "7834-01b.htm"
st.exitQuest(1)
elif cond==1 :
if st.getQuestItemsCount(ORB) :
htmltext = "7834-04.htm"
else :
htmltext = "7834-03.htm"
elif npcId==7835 :
htmltext = "7835-01.htm"
elif npcId==7934 :
if st.getInt("rps_1sttime") :
htmltext = "7934-01a.htm"
else :
htmltext = "7934-01.htm"
elif npcId==7935 :
st.unset("row")
if st.getInt("ct_1sttime") :
htmltext = "7935-01a.htm"
else :
htmltext = "7935-01.htm"
return htmltext
def onKill (self,npc,st):
npcId = npc.getNpcId()
if st.getRandom(100) < CHANCE :
st.giveItems(ORB,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(343,"343_UnderTheShadowOfTheIvoryTower","Under The Shadow Of The Ivory Tower")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(7834)
CREATED.addTalkId(7834)
CREATED.addTalkId(7835)
STARTED.addTalkId(7834)
STARTED.addTalkId(7835)
STARTED.addTalkId(7934)
STARTED.addTalkId(7935)
for i in range(563,567) :
STARTED.addKillId(i)
STARTED.addQuestDrop(7834,ORB,1)
print "importing quests: 343: Under The Shadow Of The Ivory Tower"
|
gpl-2.0
| 7,095,988,597,978,099,000 | -381,933,638,727,430,700 | 35.600962 | 170 | 0.543019 | false |
jumping/Diamond
|
src/collectors/exim/exim.py
|
60
|
1658
|
# coding=utf-8
"""
Shells out to get the exim queue length
#### Dependencies
* /usr/sbin/exim
"""
import diamond.collector
import subprocess
import os
from diamond.collector import str_to_bool
class EximCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(EximCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the exim binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
'sudo_user': 'User to sudo as',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EximCollector, self).get_default_config()
config.update({
'path': 'exim',
'bin': '/usr/sbin/exim',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'sudo_user': 'root',
})
return config
def collect(self):
if not os.access(self.config['bin'], os.X_OK):
return
command = [self.config['bin'], '-bpc']
if str_to_bool(self.config['use_sudo']):
command = [
self.config['sudo_cmd'],
'-u',
self.config['sudo_user']
].extend(command)
queuesize = subprocess.Popen(
command, stdout=subprocess.PIPE).communicate()[0].split()
if not len(queuesize):
return
queuesize = queuesize[-1]
self.publish('queuesize', queuesize)
|
mit
| 1,330,970,929,039,931,600 | 5,542,338,503,156,182,000 | 25.31746 | 74 | 0.5193 | false |
xodus7/tensorflow
|
tensorflow/contrib/solvers/python/kernel_tests/util_test.py
|
25
|
4551
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UtilTest(test.TestCase):
def _testCreateOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
op = util.create_operator(a)
ax = op.apply(x)
aty = op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={a: a_np,
x: x_np,
y: y_np})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, np.dot(a_np, x_np))
self.assertAllClose(aty_val, np.dot(a_np.T, y_np))
def testCreateOperator(self):
self._testCreateOperator(True)
def testCreateOperatorUnknownShape(self):
self._testCreateOperator(False)
def _testIdentityOperator(self, use_static_shape_):
for dtype in np.float32, np.float64:
a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype)
x_np = np.array([[2.], [-3.]], dtype=dtype)
y_np = np.array([[2], [-3.], [5.]], dtype=dtype)
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np, dtype=dtype)
x = constant_op.constant(x_np, dtype=dtype)
y = constant_op.constant(y_np, dtype=dtype)
else:
a = array_ops.placeholder(dtype)
x = array_ops.placeholder(dtype)
y = array_ops.placeholder(dtype)
id_op = util.identity_operator(a)
ax = id_op.apply(x)
aty = id_op.apply_adjoint(y)
op_shape = ops.convert_to_tensor(id_op.shape)
if use_static_shape_:
op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty])
else:
op_shape_val, ax_val, aty_val = sess.run(
[op_shape, ax, aty], feed_dict={
a: a_np,
x: x_np,
y: y_np
})
self.assertAllEqual(op_shape_val, [3, 2])
self.assertAllClose(ax_val, x_np)
self.assertAllClose(aty_val, y_np)
def testIdentityOperator(self):
self._testIdentityOperator(True)
def testIdentityOperatorUnknownShape(self):
self._testIdentityOperator(False)
def testL2Norm(self):
with self.cached_session():
x_np = np.array([[2], [-3.], [5.]])
x_norm_np = np.linalg.norm(x_np)
x_normalized_np = x_np / x_norm_np
x = constant_op.constant(x_np)
l2norm = util.l2norm(x)
l2norm_squared = util.l2norm_squared(x)
x_normalized, x_norm = util.l2normalize(x)
self.assertAllClose(l2norm.eval(), x_norm_np)
self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np))
self.assertAllClose(x_norm.eval(), x_norm_np)
self.assertAllClose(x_normalized.eval(), x_normalized_np)
if __name__ == '__main__':
test.main()
|
apache-2.0
| -2,628,751,934,174,480,400 | 6,091,684,583,291,529,000 | 37.243697 | 80 | 0.59811 | false |
chadnickbok/npm
|
node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
1355
|
44604
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
artistic-2.0
| -3,859,796,823,103,160,300 | 681,470,284,210,657,000 | 35.530713 | 80 | 0.662721 | false |
ShineFan/odoo
|
addons/l10n_be_hr_payroll/__openerp__.py
|
312
|
1872
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['hr_payroll'],
'version': '1.0',
'description': """
Belgian Payroll Rules.
======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
* Salary Maj, ONSS, Withholding Tax, Child Allowance, ...
""",
'auto_install': False,
'demo': ['l10n_be_hr_payroll_demo.xml'],
'website': 'https://www.odoo.com/page/accounting',
'data':[
'l10n_be_hr_payroll_view.xml',
'l10n_be_hr_payroll_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -1,446,653,447,152,014,300 | 4,152,477,186,177,984,500 | 34.320755 | 78 | 0.60203 | false |
ofermend/medicare-demo
|
socialite/jython/Lib/distutils/tests/test_install.py
|
155
|
1861
|
"""Tests for distutils.command.install."""
import os
import unittest
from distutils.command.install import install
from distutils.core import Distribution
from distutils.tests import support
class InstallTestCase(support.TempdirManager, unittest.TestCase):
def test_home_installation_scheme(self):
# This ensure two things:
# - that --home generates the desired set of directory names
# - test --home is supported on all platforms
builddir = self.mkdtemp()
destination = os.path.join(builddir, "installation")
dist = Distribution({"name": "foopkg"})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(builddir, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
build_base=builddir,
build_lib=os.path.join(builddir, "lib"),
)
cmd = install(dist)
cmd.home = destination
cmd.ensure_finalized()
self.assertEqual(cmd.install_base, destination)
self.assertEqual(cmd.install_platbase, destination)
def check_path(got, expected):
got = os.path.normpath(got)
expected = os.path.normpath(expected)
self.assertEqual(got, expected)
libdir = os.path.join(destination, "lib", "python")
check_path(cmd.install_lib, libdir)
check_path(cmd.install_platlib, libdir)
check_path(cmd.install_purelib, libdir)
check_path(cmd.install_headers,
os.path.join(destination, "include", "python", "foopkg"))
check_path(cmd.install_scripts, os.path.join(destination, "bin"))
check_path(cmd.install_data, destination)
def test_suite():
return unittest.makeSuite(InstallTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
|
apache-2.0
| 281,873,379,661,590,340 | -4,714,338,333,426,981,000 | 32.836364 | 76 | 0.648039 | false |
lightning-round/salud-api
|
app/mod_profiles/models/Permission.py
|
3
|
1397
|
# -*- coding: utf-8 -*-
from app.mod_shared.models.db import db
class Permission(db.Model):
# Attributes
id = db.Column(db.Integer, primary_key=True)
# Foreign keys
analysis_id = db.Column(db.Integer, db.ForeignKey('analysis.id'))
permission_type_id = db.Column(db.Integer, db.ForeignKey('permission_type.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# Relationships
analysis = db.relationship('Analysis',
backref=db.backref('permissions',
lazy='dynamic',
cascade='all, delete-orphan',
)
)
permission_type = db.relationship('PermissionType',
backref=db.backref('permissions', lazy='dynamic'))
user = db.relationship('User',
backref=db.backref('permissions', lazy='dynamic'))
def __init__(self, analysis_id, permission_type_id, user_id):
self.analysis_id = analysis_id
self.permission_type_id = permission_type_id
self.user_id = user_id
def __repr__(self):
return '<PermissionType: %r>' % self.id
|
gpl-2.0
| -8,649,825,916,421,496,000 | 5,716,382,398,882,050,000 | 44.064516 | 88 | 0.478883 | false |
XXMrHyde/android_external_chromium_org
|
tools/telemetry/telemetry/core/chrome/inspector_page_unittest.py
|
29
|
2064
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.unittest import tab_test_case
unittest_data_dir = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'unittest_data')
class InspectorPageTest(tab_test_case.TabTestCase):
def __init__(self, *args):
super(InspectorPageTest, self).__init__(*args)
def setUp(self):
super(InspectorPageTest, self).setUp()
self._browser.SetHTTPServerDirectories(unittest_data_dir)
def testPageNavigateToNormalUrl(self):
self._tab.Navigate(self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
def testCustomActionToNavigate(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
custom_action_called = [False]
def CustomAction():
custom_action_called[0] = True
self._tab.ExecuteJavaScript('document.getElementById("clickme").click();')
self._tab.PerformActionAndWaitForNavigate(CustomAction)
self.assertTrue(custom_action_called[0])
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testGetCookieByName(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self._tab.ExecuteJavaScript('document.cookie="foo=bar"')
self.assertEquals(self._tab.GetCookieByName('foo'), 'bar')
def testScriptToEvaluateOnCommit(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'),
script_to_evaluate_on_commit='var foo = "bar";')
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(self._tab.EvaluateJavaScript('foo'), 'bar')
|
bsd-3-clause
| -9,153,386,449,268,826,000 | -2,265,795,486,315,031,000 | 36.527273 | 80 | 0.70155 | false |
rven/odoo
|
addons/auth_signup/models/res_config_settings.py
|
4
|
1186
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ast import literal_eval
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
auth_signup_reset_password = fields.Boolean(string='Enable password reset from Login page', config_parameter='auth_signup.reset_password')
auth_signup_uninvited = fields.Selection([
('b2b', 'On invitation'),
('b2c', 'Free sign up'),
], string='Customer Account', default='b2b', config_parameter='auth_signup.invitation_scope')
auth_signup_template_user_id = fields.Many2one('res.users', string='Template user for new users created through signup',
config_parameter='base.template_portal_user_id')
def open_template_user(self):
action = self.env["ir.actions.actions"]._for_xml_id("base.action_res_users")
action['res_id'] = literal_eval(self.env['ir.config_parameter'].sudo().get_param('base.template_portal_user_id', 'False'))
action['views'] = [[self.env.ref('base.view_users_form').id, 'form']]
return action
|
agpl-3.0
| 4,411,582,940,558,315,000 | 8,604,047,233,272,267,000 | 48.416667 | 142 | 0.664418 | false |
kvar/ansible
|
test/units/modules/network/fortios/test_fortios_switch_controller_custom_command.py
|
21
|
8565
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_switch_controller_custom_command
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_custom_command.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_custom_command_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_custom_command': {
'command': 'test_value_3',
'command_name': 'test_value_4',
'description': 'test_value_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'command': 'test_value_3',
'command-name': 'test_value_4',
'description': 'test_value_5'
}
set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_custom_command_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_custom_command': {
'command': 'test_value_3',
'command_name': 'test_value_4',
'description': 'test_value_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'command': 'test_value_3',
'command-name': 'test_value_4',
'description': 'test_value_5'
}
set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_custom_command_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_custom_command': {
'command': 'test_value_3',
'command_name': 'test_value_4',
'description': 'test_value_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller', 'custom-command', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_custom_command_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_custom_command': {
'command': 'test_value_3',
'command_name': 'test_value_4',
'description': 'test_value_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller', 'custom-command', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_custom_command_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_custom_command': {
'command': 'test_value_3',
'command_name': 'test_value_4',
'description': 'test_value_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'command': 'test_value_3',
'command-name': 'test_value_4',
'description': 'test_value_5'
}
set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_custom_command_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_custom_command': {
'random_attribute_not_valid': 'tag',
'command': 'test_value_3',
'command_name': 'test_value_4',
'description': 'test_value_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'command': 'test_value_3',
'command-name': 'test_value_4',
'description': 'test_value_5'
}
set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
gpl-3.0
| -3,270,803,837,554,397,700 | -4,377,385,826,960,763,400 | 38.109589 | 142 | 0.672971 | false |
flavour/helios
|
modules/s3/s3validators.py
|
3
|
69315
|
# -*- coding: utf-8 -*-
""" Custom Validators
@requires: U{B{I{gluon}} <http://web2py.com>}
@author: Fran Boon <fran[at]aidiq.com>
@author: Dominic König <dominic[at]aidiq.com>
@author: Michael Howden <michael[at]aidiq.com>
@author: sunneach
@copyright: (c) 2010-2011 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["single_phone_number_pattern",
"multi_phone_number_pattern",
"IS_LAT",
"IS_LON",
"IS_INT_AMOUNT",
"IS_FLOAT_AMOUNT",
"IS_HTML_COLOUR",
"THIS_NOT_IN_DB",
"IS_UTC_OFFSET",
"IS_UTC_DATETIME",
"IS_UTC_DATETIME_IN_RANGE",
"IS_ONE_OF",
"IS_ONE_OF_EMPTY",
"IS_ONE_OF_EMPTY_SELECT",
"IS_NOT_ONE_OF",
"IS_LOCATION",
"IS_LOCATION_SELECTOR",
"IS_SITE_SELECTOR",
"IS_ADD_PERSON_WIDGET",
"IS_ACL",
"QUANTITY_INV_ITEM",
"IS_IN_SET_LAZY"]
import time
import re
from datetime import datetime, timedelta
from gluon import current, Field, IS_MATCH, IS_NOT_IN_DB, IS_IN_SET, IS_INT_IN_RANGE, IS_FLOAT_IN_RANGE
from gluon.validators import Validator
from gluon.storage import Storage
def options_sorter(x, y):
return (str(x[1]).upper() > str(y[1]).upper() and 1) or -1
# -----------------------------------------------------------------------------
# Phone number requires
# Multiple phone numbers can be separated by comma, slash, semi-colon.
# (Semi-colon appears in Brazil OSM data.)
# @ToDo: Need to beware of separators used inside phone numbers
# (e.g. 555-1212, ext 9), so may need fancier validation if we see that.
# @ToDo: Add tooltip giving list syntax, and warning against above.
# (Current use is in importing OSM files, so isn't interactive.)
# @ToDo: Code that should only have a single # should use
# s3_single_phone_requires. Check what messaging assumes.
phone_number_pattern = "\+?\s*[\s\-\.\(\)\d]+(?:(?: x| ext)\s?\d{1,5})?"
single_phone_number_pattern = "%s$" % phone_number_pattern
multi_phone_number_pattern = "%s(\s*(,|/|;)\s*%s)*$" % (phone_number_pattern,
phone_number_pattern)
# -----------------------------------------------------------------------------
class IS_LAT(object):
"""
example:
INPUT(_type="text", _name="name", requires=IS_LAT())
latitude has to be in degrees between -90 & 90
"""
def __init__(self,
error_message = "Latitude/Northing should be between -90 & 90!"
):
self.minimum = -90
self.maximum = 90
self.error_message = error_message
def __call__(self, value):
try:
value = float(value)
if self.minimum <= value <= self.maximum:
return (value, None)
except ValueError:
pass
return (value, self.error_message)
class IS_LON(object):
"""
example:
INPUT(_type="text", _name="name", requires=IS_LON())
longitude has to be in degrees between -180 & 180
"""
def __init__(self,
error_message = "Longitude/Easting should be between -180 & 180!"
):
self.minimum = -180
self.maximum = 180
self.error_message = error_message
def __call__(self, value):
try:
value = float(value)
if self.minimum <= value <= self.maximum:
return (value, None)
except ValueError:
pass
return (value, self.error_message)
# -----------------------------------------------------------------------------
class IS_INT_AMOUNT(IS_INT_IN_RANGE):
"""
Validation, widget and representation of
integer-values with thousands-separators
"""
def __init__(self,
minimum=None,
maximum=None,
error_message=None):
IS_INT_IN_RANGE.__init__(self,
minimum=minimum,
maximum=maximum,
error_message=error_message)
def __call__(self, value):
thousands_sep = ","
value = str(value).replace(thousands_sep, "")
return IS_INT_IN_RANGE.__call__(self, value)
@staticmethod
def represent(value):
if value is None:
return ""
ts = current.deployment_settings.get_L10n_thousands_separator()
if not ts:
thousands_separator = ""
else:
thousands_separator = ","
return format(int(value), "%sd" % thousands_separator)
@staticmethod
def widget(f, v, **attributes):
from gluon.sqlhtml import StringWidget
attr = Storage(attributes)
classes = attr.get("_class", "").split(" ")
classes = " ".join([c for c in classes if c != "integer"])
_class = "%s int_amount" % classes
attr.update(_class=_class)
return StringWidget.widget(f, v, **attr)
# -----------------------------------------------------------------------------
class IS_FLOAT_AMOUNT(IS_FLOAT_IN_RANGE):
"""
Validation, widget and representation of
float-values with thousands-separators
"""
def __init__(self,
minimum=None,
maximum=None,
error_message=None,
dot='.'):
IS_FLOAT_IN_RANGE.__init__(self,
minimum=minimum,
maximum=maximum,
error_message=error_message,
dot=dot)
def __call__(self, value):
thousands_sep = ","
value = str(value).replace(thousands_sep, "")
return IS_FLOAT_IN_RANGE.__call__(self, value)
@staticmethod
def represent(value, precision=None):
if value is None:
return ""
ts = current.deployment_settings.get_L10n_thousands_separator()
if not ts:
thousands_separator = ""
else:
thousands_separator = ","
if precision is not None:
fl = format(float(value), "%s.%df" % (thousands_separator, precision))
else:
fl = format(float(value), "%sf" % thousands_separator).rstrip("0")
if fl[-1] == ".":
fl += "0"
return fl
@staticmethod
def widget(f, v, **attributes):
from gluon.sqlhtml import StringWidget
attr = Storage(attributes)
classes = attr.get("_class", "").split(" ")
classes = " ".join([c for c in classes if c != "double"])
_class = "%s float_amount" % classes
attr.update(_class=_class)
return StringWidget.widget(f, v, **attr)
# -----------------------------------------------------------------------------
class IS_HTML_COLOUR(IS_MATCH):
"""
example::
INPUT(_type="text", _name="name", requires=IS_HTML_COLOUR())
"""
def __init__(self,
error_message="must be a 6 digit hex code!"
):
IS_MATCH.__init__(self, "^[0-9a-fA-F]{6}$", error_message)
# -----------------------------------------------------------------------------
class THIS_NOT_IN_DB(object):
"""
Unused currently since doesn't quite work.
See: http://groups.google.com/group/web2py/browse_thread/thread/27b14433976c0540
"""
def __init__(self,
dbset,
field,
this,
error_message = "value already in database!"
):
if hasattr(dbset, "define_table"):
current.dbset = dbset()
else:
current.dbset = dbset
self.field = field
self.value = this
self.error_message = error_message
self.record_id = 0
def set_self_id(self, id):
self.record_id = id
def __call__(self, value):
tablename, fieldname = str(self.field).split(".")
field = current.dbset._db[tablename][fieldname]
rows = current.dbset(field == self.value).select(limitby=(0, 1))
if len(rows)>0 and str(rows[0].id) != str(self.record_id):
return (self.value, self.error_message)
return (value, None)
# IS_ONE_OF_EMPTY -------------------------------------------------------------------
# by sunneach 2010-02-03
# copy of nursix's IS_ONE_OF with removed 'options' method
regex1 = re.compile("[\w_]+\.[\w_]+")
regex2 = re.compile("%\((?P<name>[^\)]+)\)s")
class IS_ONE_OF_EMPTY(Validator):
"""
Filtered version of IS_IN_DB():
validates a given value as key of another table, filtered by the
'filterby' field for one of the 'filter_opts' options
(=a selective IS_IN_DB())
NB Filtering isn't active in GQL.
For the dropdown representation:
'label' can be a string template for the record, or a set of field
names of the fields to be used as option labels, or a function or
lambda to create an option label from the respective record (which
has to return a string, of course). The function will take the
record as an argument.
No 'options' method as designed to be called next to an
Autocomplete field so don't download a large dropdown
unnecessarily.
"""
def __init__(self,
dbset,
field,
label=None,
filterby=None,
filter_opts=None,
not_filterby=None,
not_filter_opts=None,
error_message="invalid value!",
orderby=None,
groupby=None,
left=None,
multiple=False,
zero="",
sort=False,
_and=None,
):
if hasattr(dbset, "define_table"):
self.dbset = dbset()
else:
self.dbset = dbset
self.field = field
(ktable, kfield) = str(self.field).split(".")
if not label:
label = "%%(%s)s" % kfield
if isinstance(label, str):
if regex1.match(str(label)):
label = "%%(%s)s" % str(label).split(".")[-1]
ks = regex2.findall(label)
if not kfield in ks:
ks += [kfield]
fields = ["%s.%s" % (ktable, k) for k in ks]
else:
ks = [kfield]
fields =[str(f) for f in self.dbset._db[ktable]]
self.fields = fields
self.label = label
self.ktable = ktable
if not kfield or not len(kfield):
self.kfield = "id"
else:
self.kfield = kfield
self.ks = ks
self.error_message = error_message
self.theset = None
self.orderby = orderby
self.groupby = groupby
self.left = left
self.multiple = multiple
self.zero = zero
self.sort = sort
self._and = _and
self.filterby = filterby
self.filter_opts = filter_opts
self.not_filterby = not_filterby
self.not_filter_opts = not_filter_opts
def set_self_id(self, id):
if self._and:
self._and.record_id = id
def set_filter(self,
filterby = None,
filter_opts = None,
not_filterby = None,
not_filter_opts = None):
"""
This can be called from prep to apply a filter base on
data in the record or the primary resource id.
"""
if filterby:
self.filterby = filterby
if filter_opts:
self.filter_opts = filter_opts
if not_filterby:
self.not_filterby = not_filterby
if not_filter_opts:
self.not_filter_opts = not_filter_opts
def build_set(self):
dbset = self.dbset
db = dbset._db
if self.ktable in db:
table = db[self.ktable]
auth = current.auth
if self.fields == "all":
fields = [f for f in table if isinstance(f, Field)]
else:
fieldnames = [f.split(".")[1] if "." in f else f for f in self.fields]
fields = [table[k] for k in fieldnames if k in table.fields]
if db._dbname not in ("gql", "gae"):
orderby = self.orderby or reduce(lambda a, b: a|b, fields)
groupby = self.groupby
# Caching breaks Colorbox dropdown refreshes
#dd = dict(orderby=orderby, groupby=groupby, cache=(current.cache.ram, 60))
dd = dict(orderby=orderby, groupby=groupby)
query = auth.s3_accessible_query("read", table)
if "deleted" in table:
query = ((table["deleted"] == False) & query)
if self.filterby and self.filterby in table:
if self.filter_opts:
query = query & (table[self.filterby].belongs(self.filter_opts))
if not self.orderby:
dd.update(orderby=table[self.filterby])
if self.not_filterby and self.not_filterby in table and self.not_filter_opts:
query = query & (~(table[self.not_filterby].belongs(self.not_filter_opts)))
if not self.orderby:
dd.update(orderby=table[self.filterby])
if self.left is not None:
dd.update(left=self.left)
records = dbset(query).select(*fields, **dd)
else:
# Note this does not support filtering.
orderby = self.orderby or \
reduce(lambda a, b: a|b, (f for f in fields
if not f.name == "id"))
#dd = dict(orderby=orderby, cache=(current.cache.ram, 60))
dd = dict(orderby=orderby)
records = dbset.select(db[self.ktable].ALL, **dd)
self.theset = [str(r[self.kfield]) for r in records]
#labels = []
label = self.label
try:
labels = map(label, records)
except TypeError:
if isinstance(label, str):
labels = map(lambda r: label % dict(r), records)
elif isinstance(label, (list, tuple)):
labels = map(lambda r: \
" ".join([r[l] for l in label if l in r]),
records)
elif callable(label):
# Is a function
labels = map(label, records)
elif "name" in table:
labels = map(lambda r: r.name, records)
else:
labels = map(lambda r: r[self.kfield], records)
self.labels = labels
else:
self.theset = None
self.labels = None
#Removed as we don't want any options downloaded unnecessarily
#def options(self):
def __call__(self, value):
try:
_table = self.dbset._db[self.ktable]
deleted_q = ("deleted" in _table) and (_table["deleted"] == False) or False
filter_opts_q = False
if self.filterby and self.filterby in _table:
if self.filter_opts:
filter_opts_q = _table[self.filterby].belongs(self.filter_opts)
# For a list field, Web2py now packs elements in "|x|y|" by itself,
# so that is no longer done here. The unpacking is left in for now,
# in case someone enters a list by hand that way, or in case we get
# here from the JSON widget over in s3widgets...
# Note that on the way in, nothing checks that the values supplied
# for a list:reference actually exist in the target table -- that
# is only "assured" by sending out an option list containing only
# valid ids. But what if someone constructs a request by hand?
# Or what if some of the ids get deleted between the time the form
# goes out and when it gets submitted? I just left a form sitting
# for about 10 hours (longer than session expiration, even), and it
# was accepted. So it's actually not that wildly impossible that
# an id in the list would be gone.
if self.multiple:
if isinstance(value, list):
values = value
elif isinstance(value, basestring) and \
value[0] == "|" and value[-1] == "|":
values = value[1:-1].split("|")
elif value:
values = [value]
else:
values = []
if self.theset:
if not [x for x in values if not x in self.theset]:
return (values, None)
else:
return (value, self.error_message)
else:
for v in values:
q = (_table[self.kfield] == v)
query = query is not None and query | q or q
if filter_opts_q != False:
query = query is not None and \
(filter_opts_q & (query)) or filter_opts_q
if deleted_q != False:
query = query is not None and \
(deleted_q & (query)) or deleted_q
if self.dbset(query).count() < 1:
return (value, self.error_message)
return (values, None)
elif self.theset:
if value in self.theset:
if self._and:
return self._and(value)
else:
return (value, None)
else:
values = [value]
query = None
for v in values:
q = (_table[self.kfield] == v)
query = query is not None and query | q or q
if filter_opts_q != False:
query = query is not None and \
(filter_opts_q & (query)) or filter_opts_q
if deleted_q != False:
query = query is not None and \
(deleted_q & (query)) or deleted_q
if self.dbset(query).count():
if self._and:
return self._and(value)
else:
return (value, None)
except:
pass
return (value, self.error_message)
# IS_ONE_OF -------------------------------------------------------------------
# added 2009-08-23 by nursix
# converted to subclass 2010-02-03 by sunneach: NO CHANGES in the method bodies
class IS_ONE_OF(IS_ONE_OF_EMPTY):
"""
Extends IS_ONE_OF_EMPTY by restoring the 'options' method.
"""
def options(self):
self.build_set()
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if self.zero != None and not self.multiple:
items.insert(0, ("", self.zero))
return items
# -----------------------------------------------------------------------------
class IS_ONE_OF_EMPTY_SELECT(IS_ONE_OF_EMPTY):
"""
Extends IS_ONE_OF_EMPTY by displaying an empty SELECT (instead of INPUT)
"""
def options(self):
return [("", "")]
# -----------------------------------------------------------------------------
class IS_NOT_ONE_OF(IS_NOT_IN_DB):
"""
Filtered version of IS_NOT_IN_DB()
- understands the 'deleted' field.
- makes the field unique (amongst non-deleted field)
Example:
- INPUT(_type="text", _name="name", requires=IS_NOT_ONE_OF(db, db.table))
"""
def __call__(self, value):
db = current.db
translate = lambda m: m # workaround
value = str(value)
if not value.strip():
return (value, translate(self.error_message))
if value in self.allowed_override:
return (value, None)
(tablename, fieldname) = str(self.field).split(".")
_table = db[tablename]
field = _table[fieldname]
field = db[tablename][fieldname]
query = (field == value)
if "deleted" in _table:
query = (_table["deleted"] == False) & query
rows = db(query).select(limitby=(0, 1))
if len(rows) > 0:
if isinstance(self.record_id, dict):
for f in self.record_id:
if str(getattr(rows[0], f)) != str(self.record_id[f]):
return (value, translate(self.error_message))
elif str(rows[0].id) != str(self.record_id):
return (value, translate(self.error_message))
return (value, None)
# -----------------------------------------------------------------------------
class IS_LOCATION(Validator):
"""
Allow all locations, or locations by level.
Optimized for use within the S3LocationSelectorWidget's L0 Dropdown.
"""
def __init__(self,
level = None,
error_message = None
):
T = current.T
self.level = level # can be a List or a single element
self.error_message = error_message or T("Invalid Location!")
def __call__(self, value):
db = current.db
table = db.gis_location
level = self.level
if level and level == "L0":
# Use cached countries. This returns name if id is for a country.
have_location = gis.get_country(value)
else:
query = (table.id == value) & (table.deleted == False)
if level:
if isinstance(level, list):
query = query & (table.level.belongs(level))
else:
query = query & (table.level == level)
have_location = db(query).select(table.id,
limitby=(0, 1)).first()
if have_location:
return (value, None)
else:
return (value, self.error_message)
# -----------------------------------------------------------------------------
class IS_LOCATION_SELECTOR(Validator):
"""
Designed for use within the S3LocationSelectorWidget.
For Create forms, this will create a new location from the additional fields
For Update forms, this will normally just check that we have a valid location_id FK
- although there is the option to create a new location there too, in which case it acts as-above.
@ToDo: Audit
"""
def __init__(self,
error_message = None,
):
T = current.T
self.error_message = error_message or T("Invalid Location!")
self.no_parent = T("Need to have all levels filled out in mode strict!")
auth = current.auth
self.no_permission = auth.messages.access_denied
def __call__(self, value):
db = current.db
auth = current.auth
gis = current.gis
table = db.gis_location
try:
# Is this an ID?
value = int(value)
# Yes: This must be an Update form
if not auth.s3_has_permission("update", table, record_id=value):
return (value, self.no_permission)
# Check that this is a valid location_id
query = (table.id == value) & \
(table.deleted == False) & \
(table.level == None) # NB Specific Locations only
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Update the record, in case changes have been made
location = self._process_values()
# onvalidation
form = Storage()
form.vars = location
gis.wkt_centroid(form)
db(table.id == value).update(name = location.name,
lat = location.lat,
lon = location.lon,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent)
# onaccept
gis.update_location_tree(value, location.parent)
return (value, None)
except:
# Create form
if not auth.s3_has_permission("create", table):
return (None, self.no_permission)
location = self._process_values()
strict = gis.get_strict_hierarchy()
if strict and not location.parent:
return (value, self.no_parent)
if location.name or location.lat or location.lon or \
location.street or location.postcode or location.parent:
# onvalidation
form = Storage()
form.vars = location
gis.wkt_centroid(form)
value = table.insert(name = location.name,
lat = location.lat,
lon = location.lon,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent,
wkt = form.vars.wkt,
lon_min = form.vars.lon_min,
lon_max = form.vars.lon_max,
lat_min = form.vars.lat_min,
lat_max = form.vars.lat_max
)
# onaccept
gis.update_location_tree(value, location.parent)
return (value, None)
else:
return (None, None)
return (value, self.error_message)
def _process_values(self):
"""
Read the request.vars & prepare for a record insert/update
Note: This is also used by IS_SITE_SELECTOR()
"""
db = current.db
auth = current.auth
gis = current.gis
request = current.request
response = current.response
session = current.session
table = db.gis_location
L0 = request.vars.get("gis_location_L0", None)
# Are we allowed to create Locations?
if not auth.s3_has_permission("create", table):
return (None, self.no_permission)
# What level of hierarchy are we allowed to edit?
if auth.s3_has_role(session.s3.system_roles.MAP_ADMIN):
# 'MapAdmin' always has permission to edit hierarchy locations
L1_allowed = True
L2_allowed = True
L3_allowed = True
L4_allowed = True
L5_allowed = True
else:
if L0:
ctable = db.gis_config
query = (ctable.region_location_id == L0)
config = db(query).select(ctable.edit_L1,
ctable.edit_L2,
ctable.edit_L3,
ctable.edit_L4,
ctable.edit_L5,
limitby=(0, 1)).first()
if L0 and config:
# Lookup each level individually
L1_allowed = config.edit_L1
L2_allowed = config.edit_L2
L3_allowed = config.edit_L3
L4_allowed = config.edit_L4
L5_allowed = config.edit_L5
else:
# default is deployment_setting
L1_allowed = response.s3.gis.edit_Lx
L2_allowed = L1_allowed
L3_allowed = L1_allowed
L4_allowed = L1_allowed
L5_allowed = L1_allowed
# We don't need to do onvalidation of the Location Hierarchy records
# separately as we don't have anything extra to validate than we have
# done already
# We don't use the full onaccept as we don't need to
onaccept = gis.update_location_tree
L1 = request.vars.get("gis_location_L1", None)
L2 = request.vars.get("gis_location_L2", None)
L3 = request.vars.get("gis_location_L3", None)
L4 = request.vars.get("gis_location_L4", None)
L5 = request.vars.get("gis_location_L5", None)
# Check if we have parents to create
# L1
if L1:
try:
# Is this an ID?
int(L1)
# Do we need to update it's parent?
if L0:
parent = L0
query = (table.id == L1)
location = db(query).select(table.parent,
limitby=(0, 1)).first()
if location and (location.parent != parent):
db(query).update(parent=parent)
onaccept(L1, parent)
except:
# Name
# Test for duplicates
query = (table.name == L1) & (table.level == "L1")
if L0:
query = query & (table.parent == L0)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L1 = location.id
elif L0 and L1_allowed:
parent = L0
L1 = table.insert(name=L1, level="L1", parent=parent)
onaccept(L1, parent)
elif L1_allowed:
L1 = table.insert(name=L1, level="L1")
onaccept(L1)
else:
L1 = None
# L2
if L2:
try:
# Is this an ID?
int(L2)
# Do we need to update it's parent?
if L1:
parent = L1
query = (table.id == L2)
location = db(query).select(table.parent,
limitby=(0, 1)).first()
if location and (location.parent != parent):
db(query).update(parent=parent)
onaccept(L2, parent)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L2 parenting direct to L0
query = (table.name == L2) & (table.level == "L2")
if L1:
query = query & (table.parent == L1)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L2 = location.id
elif L1 and L2_allowed:
parent = L1
L2 = table.insert(name=L2, level="L2", parent=parent)
onaccept(L2, parent)
elif L0 and L2_allowed:
parent = L0
L2 = table.insert(name=L2, level="L2", parent=parent)
onaccept(L2, parent)
elif L2_allowed:
L2 = table.insert(name=L2, level="L2")
onaccept(L2)
else:
L2 = None
# L3
if L3:
try:
# Is this an ID?
int(L3)
# Do we need to update it's parent?
if L2:
parent = L2
query = (table.id == L3)
location = db(query).select(table.parent,
limitby=(0, 1)).first()
if location and (location.parent != parent):
db(query).update(parent=parent)
onaccept(L3, parent)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L3 parenting direct to L0/1
query = (table.name == L3) & (table.level == "L3")
if L2:
query = query & (table.parent == L2)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L3 = location.id
elif L2 and L3_allowed:
parent = L2
L3 = table.insert(name=L3, level="L3", parent=parent)
onaccept(L3, parent)
elif L1 and L3_allowed:
parent = L1
L3 = table.insert(name=L3, level="L3", parent=parent)
onaccept(L3, parent)
elif L0 and L3_allowed:
parent = L0
L3 = table.insert(name=L3, level="L3", parent=parent)
onaccept(L3, parent)
elif L3_allowed:
L3 = table.insert(name=L3, level="L3")
onaccept(L3)
else:
L3 = None
# L4
if L4:
try:
# Is this an ID?
int(L4)
# Do we need to update it's parent?
if L3:
parent = L3
query = (table.id == L4)
location = db(query).select(table.parent,
limitby=(0, 1)).first()
if location and (location.parent != parent):
db(query).update(parent=parent)
onaccept(L4, parent)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L4 parenting direct to L0/1/2
query = (table.name == L4) & (table.level == "L4")
if L3:
query = query & (table.parent == L3)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L4 = location.id
elif L3 and L4_allowed:
parent = L3
L4 = table.insert(name=L4, level="L4", parent=parent)
onaccept(L4, parent)
elif L2 and L4_allowed:
parent = L2
L4 = table.insert(name=L4, level="L4", parent=parent)
onaccept(L4, parent)
elif L1 and L4_allowed:
parent = L1
L4 = table.insert(name=L4, level="L4", parent=parent)
onaccept(L4, parent)
elif L0 and L4_allowed:
parent = L0
L4 = table.insert(name=L4, level="L4", parent=parent)
onaccept(L4, parent)
elif L4_allowed:
L4 = table.insert(name=L4, level="L4")
onaccept(L4)
else:
L4 = None
# L5
if L5:
try:
# Is this an ID?
int(L5)
# Do we need to update it's parent?
if L4:
parent = L4
query = (table.id == L5)
location = db(query).select(table.parent,
limitby=(0, 1)).first()
if location and (location.parent != parent):
db(query).update(parent=parent)
onaccept(L5, parent)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L5 parenting direct to L0/1/2/3
query = (table.name == L5) & (table.level == "L5")
if L4:
query = query & (table.parent == L4)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L5 = location.id
elif L4 and L5_allowed:
parent = L4
L5 = table.insert(name=L5, level="L5", parent=parent)
onaccept(L5, parent)
elif L3 and L5_allowed:
parent = L3
L5 = table.insert(name=L5, level="L5", parent=parent)
onaccept(L5, parent)
elif L2 and L5_allowed:
parent = L2
L5 = table.insert(name=L5, level="L5", parent=parent)
onaccept(L5, parent)
elif L1 and L5_allowed:
parent = L1
L5 = table.insert(name=L5, level="L5", parent=parent)
onaccept(L5, parent)
elif L0 and L5_allowed:
parent = L0
L5 = table.insert(name=L5, level="L5", parent=parent)
onaccept(L5, parent)
elif L5_allowed:
L5 = table.insert(name=L5, level="L5")
onaccept(L5)
else:
L5 = None
# Check if we have a specific location to create
name = request.vars.get("gis_location_name", None)
lat = request.vars.get("gis_location_lat", None)
lon = request.vars.get("gis_location_lon", None)
street = request.vars.get("gis_location_street", None)
postcode = request.vars.get("gis_location_postcode", None)
parent = L5 or L4 or L3 or L2 or L1 or L0 or None
form = Storage()
form.vars = Storage()
form.vars.lat = lat
form.vars.lon = lon
# onvalidation
gis.wkt_centroid(form)
return Storage(
name=name,
lat=lat, lon=lon,
street=street,
postcode=postcode,
parent=parent,
wkt = form.vars.wkt,
lon_min = form.vars.lon_min,
lon_max = form.vars.lon_max,
lat_min = form.vars.lat_min,
lat_max = form.vars.lat_max
)
# -----------------------------------------------------------------------------
class IS_SITE_SELECTOR(IS_LOCATION_SELECTOR):
"""
Extends the IS_LOCATION_SELECTOR() validator to transparently support
Sites of the specified type.
Note that these cannot include any other mandatory fields other than Name & location_id
Designed for use within the ???S3LocationSelectorWidget.
For Create forms, this will create a new site & location from the additional fields
For Update forms, this will normally just check that we have a valid site_id FK
- although there is the option to create a new location there too, in which case it acts as-above.
@ToDo: Audit
"""
def __init__(self,
site_type = "project_site",
error_message = None,
):
T = current.T
self.error_message = error_message or T("Invalid Site!")
self.no_parent = T("Need to have all levels filled out in mode strict!")
auth = current.auth
self.no_permission = auth.messages.access_denied
self.site_type = site_type
def __call__(self, value):
db = current.db
auth = current.auth
gis = current.gis
table = db.gis_location
stable = db[self.site_type]
try:
# Is this an ID?
value = int(value)
# Yes: This must be an Update form
if not auth.s3_has_permission("update", stable, record_id=value):
return (value, self.no_permission)
# Check that this is a valid site_id
query = (stable.id == value) & \
(stable.deleted == False)
site = db(query).select(stable.id,
stable.name,
stable.location_id,
limitby=(0, 1)).first()
if site and site.location_id:
# Update the location, in case changes have been made
location = self._process_values()
# Location onvalidation
form = Storage()
form.vars = location
gis.wkt_centroid(form)
# Location update
lquery = (table.id == site.location_id)
db(lquery).update(name = location.name,
lat = location.lat,
lon = location.lon,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent)
# Location onaccept
gis.update_location_tree(site.location_id, location.parent)
if stable.name != location.name:
# Site Name has changed
db(query).update(name = location.name)
return (value, None)
except:
# Create form
if not auth.s3_has_permission("create", stable):
return (None, self.no_permission)
location = self._process_values()
strict = gis.get_strict_hierarchy()
if strict and not location.parent:
return (value, self.no_parent)
if location.name or location.lat or location.lon or \
location.street or location.postcode or location.parent:
# Location onvalidation
form = Storage()
form.vars = location
gis.wkt_centroid(form)
# Location creation
location_id = table.insert(name = location.name,
lat = location.lat,
lon = location.lon,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent,
wkt = form.vars.wkt,
lon_min = form.vars.lon_min,
lon_max = form.vars.lon_max,
lat_min = form.vars.lat_min,
lat_max = form.vars.lat_max
)
# Location onaccept
gis.update_location_tree(location_id, location.parent)
# Site creation
value = stable.insert(name = location.name,
location_id = location_id)
return (value, None)
else:
return (None, None)
return (value, self.error_message)
# -----------------------------------------------------------------------------
class IS_ADD_PERSON_WIDGET(Validator):
def __init__(self,
error_message=None,
mark_required=True):
self.error_message = error_message or \
current.T("Could not add person record")
self.mark_required = mark_required
def __call__(self, value):
db = current.db
manager = current.manager
request = current.request
T = current.T
try:
person_id = int(value)
except:
person_id = None
ptable = db.pr_person
table = db.pr_contact
def email_validate(value, person_id):
error_message = T("This email-address is already registered.")
if not value:
return (value, None)
value = value.strip()
query = (table.deleted != True) & \
(table.contact_method == "EMAIL") & \
(table.value == value)
if person_id:
query = query & \
(table.pe_id == ptable.pe_id) & \
(ptable.id != person_id)
email = db(query).select(table.id, limitby=(0, 1)).first()
if email:
return value, error_message
return value, None
if request.env.request_method == "POST":
_vars = request.post_vars
mobile = _vars["mobile_phone"]
# Validate the phone number
if _vars.mobile_phone:
regex = re.compile(single_phone_number_pattern)
if not regex.match(_vars.mobile_phone):
error = T("Invalid phone number")
return (person_id, error)
if person_id:
# update the person record
# Values are hard coded, but it looks to work ;)
data = Storage()
fields = ["first_name",
"middle_name",
"last_name",
"date_of_birth",
"gender",
"occupation"]
for f in fields:
if f in _vars and _vars[f]:
data[f] = _vars[f]
if data:
db(ptable.id == person_id).update(**data)
# Now check the contact information
record = ptable(person_id)
pe_id = record.pe_id
if pe_id:
# Check to see if the contact details have been set up
# First Email
record = table(pe_id=pe_id,
contact_method="EMAIL",
)
email = _vars["email"]
if record and email: # update
if email != record.value:
db(table.id == record.id).update(value=email)
else: # insert
table.insert(pe_id=pe_id,
contact_method="EMAIL",
value=email
)
# Now mobile phone
record = table(pe_id=pe_id,
contact_method="SMS",
)
if record: # update
if mobile != record.value:
db(table.id == record.id).update(value=mobile)
else: # insert
if mobile: # Don't insert an empty number
table.insert(pe_id=pe_id,
contact_method="SMS",
value=mobile
)
pass
else:
# Filter out location_id (location selector form values
# being processed only after this widget has been validated)
_vars = Storage([(k, _vars[k])
for k in _vars
if k != "location_id"])
# Validate the email
email, error = email_validate(_vars.email, None)
if error:
return (person_id, error)
# Validate and add the person record
table = db.pr_person
for f in table._filter_fields(_vars):
value, error = manager.validate(table, None, f, _vars[f])
if error:
return (None, None)
person_id = table.insert(**table._filter_fields(_vars))
# Need to update post_vars here,
# for some reason this doesn't happen through validation alone
request.post_vars.update(person_id=str(person_id))
if person_id:
# Update the super-entity
manager.model.update_super(table, dict(id=person_id))
person = table[person_id]
# Add contact information as provided
table = db.pr_contact
table.insert(pe_id=person.pe_id,
contact_method="EMAIL",
value=_vars.email)
if _vars.mobile_phone:
table.insert(pe_id=person.pe_id,
contact_method="SMS",
value=_vars.mobile_phone)
else:
return (person_id, self.error_message)
return (person_id, None)
# -----------------------------------------------------------------------------
class IS_UTC_OFFSET(Validator):
"""
Validates a given string value as UTC offset in the format +/-HHMM
@author: nursix
@param error_message: the error message to be returned
@note:
all leading parts of the string (before the trailing offset specification)
will be ignored and replaced by 'UTC ' in the return value, if the string
passes through.
"""
def __init__(self,
error_message="invalid UTC offset!"
):
self.error_message = error_message
@staticmethod
def get_offset_value(offset_str):
if offset_str and len(offset_str) >= 5 and \
(offset_str[-5] == "+" or offset_str[-5] == "-") and \
offset_str[-4:].isdigit():
offset_hrs = int(offset_str[-5] + offset_str[-4:-2])
offset_min = int(offset_str[-5] + offset_str[-2:])
offset = 3600*offset_hrs + 60*offset_min
return offset
else:
return None
def __call__(self, value):
if value and isinstance(value, str):
_offset_str = value.strip()
offset = self.get_offset_value(_offset_str)
if offset is not None and offset > -86340 and offset < 86340:
# Add a leading 'UTC ',
# otherwise leading '+' and '0' will be stripped away by web2py
return ("UTC " + _offset_str[-5:], None)
return (value, self.error_message)
# -----------------------------------------------------------------------------
#
class IS_UTC_DATETIME(Validator):
"""
Validates a given value as datetime string and returns the
corresponding UTC datetime.
Example:
- INPUT(_type="text", _name="name", requires=IS_UTC_DATETIME())
@author: nursix
@param format: strptime/strftime format template string, for
directives refer to your strptime implementation
@param error_message: dict of error messages to be returned
@param utc_offset: offset to UTC in seconds, if not specified, the
value is considered to be UTC
@param allow_future: whether future date/times are allowed or not,
if set to False, all date/times beyond
now+max_future will fail
@type allow_future: boolean
@param max_future: the maximum acceptable future time interval in
seconds from now for unsynchronized local clocks
@note:
datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss,
with an optional trailing UTC offset specified as +/-HHMM
(+ for eastern, - for western timezones)
"""
def __init__(self,
format=None,
error_message=None,
utc_offset=None,
allow_future=True,
max_future=900
):
if format is None:
self.format = current.deployment_settings.get_L10n_datetime_format()
else:
self.format = format
self.error_message = dict(
format = "Required format: %s!" % self.format,
offset = "Invalid UTC offset!",
future = "Future times not allowed!")
if error_message and isinstance(error_message, dict):
self.error_message["format"] = error_message.get("format", None) or self.error_message["format"]
self.error_message["offset"] = error_message.get("offset", None) or self.error_message["offset"]
self.error_message["future"] = error_message.get("future", None) or self.error_message["future"]
elif error_message:
self.error_message["format"] = error_message
if utc_offset is None:
utc_offset = current.session.s3.utc_offset
validate = IS_UTC_OFFSET()
offset, error = validate(utc_offset)
if error:
self.utc_offset = "UTC +0000" # fallback to UTC
else:
self.utc_offset = offset
self.allow_future = allow_future
self.max_future = max_future
def __call__(self, value):
_dtstr = value.strip()
if len(_dtstr) > 6 and \
(_dtstr[-6:-4] == " +" or _dtstr[-6:-4] == " -") and \
_dtstr[-4:].isdigit():
# UTC offset specified in dtstr
dtstr = _dtstr[0:-6]
_offset_str = _dtstr[-5:]
else:
# use default UTC offset
dtstr = _dtstr
_offset_str = self.utc_offset
offset_hrs = int(_offset_str[-5] + _offset_str[-4:-2])
offset_min = int(_offset_str[-5] + _offset_str[-2:])
offset = 3600 * offset_hrs + 60 * offset_min
# Offset must be in range -1439 to +1439 minutes
if offset < -86340 or offset > 86340:
return (dt, self.error_message["offset"])
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(dtstr, str(self.format))
dt = datetime(y, m, d, hh, mm, ss)
except:
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(dtstr+":00", str(self.format))
dt = datetime(y, m, d, hh, mm, ss)
except:
return(value, self.error_message["format"])
if self.allow_future:
return (dt, None)
else:
latest = datetime.utcnow() + timedelta(seconds=self.max_future)
dt_utc = dt - timedelta(seconds=offset)
if dt_utc > latest:
return (dt_utc, self.error_message["future"])
else:
return (dt_utc, None)
def formatter(self, value):
format = self.format
offset = IS_UTC_OFFSET.get_offset_value(self.utc_offset)
if not value:
return "-"
elif offset:
dt = value + timedelta(seconds=offset)
return dt.strftime(str(format))
else:
dt = value
return dt.strftime(str(format)) + " +0000"
# -----------------------------------------------------------------------------
class IS_UTC_DATETIME_IN_RANGE(Validator):
def __init__(self,
format=None,
error_message=None,
utc_offset=None,
minimum=None,
maximum=None):
if format is None:
self.format = current.deployment_settings.get_L10n_datetime_format()
else:
self.format = format
self.utc_offset = utc_offset
self.minimum = minimum
self.maximum = maximum
delta = timedelta(seconds=self.delta())
min_local = minimum and minimum + delta or None
max_local = maximum and maximum + delta or None
if error_message is None:
if minimum is None and maximum is None:
error_message = "enter date and time"
elif minimum is None:
error_message = "enter date and time on or before %(max)s"
elif maximum is None:
error_message = "enter date and time on or after %(min)s"
else:
error_message = "enter date and time in range %(min)s %(max)s"
d = dict(min = min_local, max = max_local)
self.error_message = error_message % d
def delta(self, utc_offset=None):
if utc_offset is not None:
self.utc_offset = utc_offset
if self.utc_offset is None:
self.utc_offset = current.session.s3.utc_offset
validate = IS_UTC_OFFSET()
offset, error = validate(self.utc_offset)
if error:
self.utc_offset = "UTC +0000" # fallback to UTC
else:
self.utc_offset = offset
delta = IS_UTC_OFFSET.get_offset_value(self.utc_offset)
return delta
def __call__(self, value):
val = value.strip()
# Get UTC offset
if len(val) > 5 and val[-5] in ("+", "-") and val[-4:].isdigit():
# UTC offset specified in dtstr
dtstr = val[0:-5].strip()
utc_offset = "UTC %s" % val[-5:]
else:
# use default UTC offset
dtstr = val
utc_offset = self.utc_offset
# Offset must be in range -2359 to +2359
offset = self.delta(utc_offset=utc_offset)
if offset < -86340 or offset > 86340:
return (val, self.error_message)
# Convert into datetime object
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(dtstr, str(self.format))
dt = datetime(y, m, d, hh, mm, ss)
except:
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(dtstr+":00", str(self.format))
dt = datetime(y, m, d, hh, mm, ss)
except:
return(value, self.error_message)
# Validate
dt_utc = dt - timedelta(seconds=offset)
if self.minimum and dt_utc < self.minimum or \
self.maximum and dt_utc > self.maximum:
return (dt_utc, self.error_message)
else:
return (dt_utc, None)
def formatter(self, value):
format = self.format
offset = self.delta()
if not value:
return "-"
elif offset:
dt = value + timedelta(seconds=offset)
return dt.strftime(str(format))
else:
dt = value
return dt.strftime(str(format)) + "+0000"
# -----------------------------------------------------------------------------
class IS_ACL(IS_IN_SET):
"""
Validator for ACLs
@attention: Incomplete! Does not validate yet, but just convert.
@author: Dominic König <[email protected]>
"""
def __call__(self, value):
"""
Validation
@param value: the value to validate
"""
if not isinstance(value, (list, tuple)):
value = [value]
acl = 0x0000
for v in value:
try:
flag = int(v)
except (ValueError, TypeError):
flag = 0x0000
else:
acl |= flag
return (acl, None)
# -----------------------------------------------------------------------------
class QUANTITY_INV_ITEM(object):
"""
For Inv module
by Michael Howden
"""
def __init__(self,
db,
inv_item_id,
item_pack_id
):
self.inv_item_id = inv_item_id
self.item_pack_id = item_pack_id
current.db = db
def __call__(self, value):
db = current.db
error = "Invalid Quantity" # @todo: better error catching
query = (db.inv_inv_item.id == self.inv_item_id) & \
(db.inv_inv_item.item_pack_id == db.supply_item_pack.id)
inv_item_record = db(query).select(db.inv_inv_item.quantity,
db.supply_item_pack.quantity,
db.supply_item_pack.name,
limitby = (0, 1)).first() # @todo: this should be a virtual field
if inv_item_record and value:
query = (db.supply_item_pack.id == self.item_pack_id)
send_quantity = float(value) * db(query).select(db.supply_item_pack.quantity,
limitby=(0, 1)).first().quantity
inv_quantity = inv_item_record.inv_inv_item.quantity * \
inv_item_record.supply_item_pack.quantity
if send_quantity > inv_quantity:
return (value,
"Only %s %s (%s) in the Inventory." %
(inv_quantity,
inv_item_record.supply_item_pack.name,
inv_item_record.supply_item_pack.quantity)
)
else:
return (value, None)
else:
return (value, error)
def formatter(self, value):
return value
# -----------------------------------------------------------------------------
class IS_IN_SET_LAZY(Validator):
"""
Like IS_IN_SET but with options obtained from a supplied function.
Options are instantiated when the validator or its options() method is
called, so don't need to be generated until it's used. Useful if the
field is not needed on every request, and does significant processing
to construct its options, or generates a large collection. If the
options are just from a database query, one can use IS_ONE_OF instead.
Raises an exception if an options collection is passed rather than a
callable as this is a programming error, e.g. accidentally *calling*
the options function in the constructor instead of passing the
function. That would not get lazy options instantiation.
The options collection (theset) and labels collection parameters to
IS_IN_SET are replaced by:
@param theset_fn: Function of no arguments that returns a collection
of options and (optionally) labels. Both options and labels can be
supplied via a dict or OrderedDict (options are keys, values are
labels), list (or tuple) of two-element lists (or tuples) (element 0 in
each pair is an option, element 1 is it's label). Otherwise, labels
are obtained either by calling the supplied represent function on each
item produced by theset_fn, or (if no represent is supplied), the items
themselves are used as labels.
@param represent: Function of one argument that returns the label for
a given option.
If there is a function call that returns the collection, just put
"lambda:" in front of the call. E.g.:
Field("nationality",
requires = IS_NULL_OR(IS_IN_SET_LAZY(
lambda: gis.get_countries(key_type="code"))),
label = T("Nationality"),
represent = lambda code: gis.get_country(code, key_type="code") or UNKNOWN_OPT)
Keyword parameters are same as for IS_IN_SET, except for labels, which
is not replaced by a function that parallels theset_fn, since ordering
is problematic if theset_fn returns a dict.
"""
def __init__(
self,
theset_fn,
represent=None,
error_message="value not allowed",
multiple=False,
zero="",
sort=False,
):
self.multiple = multiple
if not callable(theset_fn):
raise TypeError("Argument must be a callable.")
self.theset_fn = theset_fn
self.theset = None
self.labels = None
self.error_message = error_message
self.zero = zero
self.sort = sort
def _make_theset(self):
theset = self.theset_fn()
if theset:
if isinstance(theset, dict):
self.theset = [str(item) for item in theset]
self.labels = theset.values()
elif isinstance(theset, (tuple,list)): # @ToDo: Can this be a Rows?
if isinstance(theset[0], (tuple,list)) and len(theset[0])==2:
self.theset = [str(item) for item,label in theset]
self.labels = [str(label) for item,label in theset]
else:
self.theset = [str(item) for item in theset]
if represent:
self.labels = [represent(item) for item in theset]
else:
self.theset = theset
def options(self):
if not self.theset:
self._make_theset()
if not self.labels:
items = [(k, k) for (i, k) in enumerate(self.theset)]
else:
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if self.zero != None and not self.multiple:
items.insert(0, ("", self.zero))
return items
def __call__(self, value):
if not self.theset:
self._make_theset()
if self.multiple:
### if below was values = re.compile("[\w\-:]+").findall(str(value))
if isinstance(value, (str,unicode)):
values = [value]
elif isinstance(value, (tuple, list)):
values = value
elif not value:
values = []
else:
values = [value]
failures = [x for x in values if not x in self.theset]
if failures and self.theset:
if self.multiple and (value == None or value == ""):
return ([], None)
return (value, self.error_message)
if self.multiple:
if isinstance(self.multiple,(tuple,list)) and \
not self.multiple[0]<=len(values)<self.multiple[1]:
return (values, self.error_message)
return (values, None)
return (value, None)
# -----------------------------------------------------------------------------
class IS_TIME_INTERVAL_WIDGET(Validator):
"""
Simple validator for the S3TimeIntervalWidget, returns
the selected time interval in seconds
"""
def __init__(self, field):
self.field = field
def __call__(self, value):
try:
val = int(value)
except ValueError:
return (0, None)
request = current.request
_vars = request.post_vars
try:
mul = int(_vars[("%s_multiplier" % self.field).replace(".", "_")])
except ValueError:
return (0, None)
seconds = val * mul
return (seconds, None)
# END -------------------------------------------------------------------------
|
mit
| 496,373,035,491,269,760 | 4,679,601,395,689,489,000 | 37.76566 | 108 | 0.477068 | false |
mmiklavc/incubator-metron
|
metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/management_ui_master.py
|
4
|
3498
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.core.source import Template
from resource_management.libraries.functions.format import format
from resource_management.libraries.script import Script
from resource_management.core.resources.system import Execute
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from management_ui_commands import ManagementUICommands
class ManagementUIMaster(Script):
def install(self, env):
from params import params
env.set_params(params)
self.install_packages(env)
def configure(self, env, upgrade_type=None, config_dir=None):
from params import params
env.set_params(params)
File(format("/etc/default/metron"),
content=Template("metron.j2")
)
File(format("{metron_config_path}/management_ui.yml"),
mode=0755,
content=Template("management_ui.yml.j2"),
owner=params.metron_user,
group=params.metron_group
)
File(format("{metron_management_ui_path}/assets/app-config.json"),
content=Template("management-ui-app-config.json.j2"),
owner=params.metron_user,
group=params.metron_group
)
Directory('/var/run/metron',
create_parents=False,
mode=0755,
owner=params.metron_user,
group=params.metron_group
)
if params.metron_knox_enabled and not params.metron_ldap_enabled:
raise Fail("Enabling Metron with Knox requires LDAP authentication. Please set 'LDAP Enabled' to true in the Metron Security tab.")
def start(self, env, upgrade_type=None):
from params import params
env.set_params(params)
self.configure(env)
commands = ManagementUICommands(params)
commands.start_management_ui()
def stop(self, env, upgrade_type=None):
from params import params
env.set_params(params)
commands = ManagementUICommands(params)
commands.stop_management_ui()
def status(self, env):
from params import status_params
env.set_params(status_params)
commands = ManagementUICommands(status_params)
commands.status_management_ui(env)
def restart(self, env):
from params import params
env.set_params(params)
self.configure(env)
commands = ManagementUICommands(params)
commands.restart_management_ui(env)
if __name__ == "__main__":
ManagementUIMaster().execute()
|
apache-2.0
| -2,156,261,124,413,639,000 | 1,192,421,622,288,167,400 | 35.061856 | 144 | 0.684677 | false |
40223102/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/xml/dom/minidom.py
|
727
|
66854
|
"""Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
gpl-3.0
| -2,905,665,871,432,209,400 | 4,964,603,458,690,740,000 | 32.662638 | 87 | 0.582164 | false |
meteorfox/PerfKitBenchmarker
|
perfkitbenchmarker/linux_packages/ycsb.py
|
1
|
28681
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install, execute, and parse results from YCSB.
YCSB (the Yahoo! Cloud Serving Benchmark) is a common method of comparing NoSQL
database performance.
https://github.com/brianfrankcooper/YCSB
For PerfKitBenchmarker, we wrap YCSB to:
* Pre-load a database with a fixed number of records.
* Execute a collection of workloads under a staircase load.
* Parse the results into PerfKitBenchmarker samples.
The 'YCSBExecutor' class handles executing YCSB on a collection of client VMs.
Generally, clients just need this class. For example, to run against
HBase 1.0:
>>> executor = ycsb.YCSBExecutor('hbase-10')
>>> samples = executor.LoadAndRun(loader_vms)
By default, this runs YCSB workloads A and B against the database, 32 threads
per client VM, with an initial database size of 1GB (1k records).
Each workload runs for at most 30 minutes.
"""
import bisect
import collections
import copy
import csv
import io
import itertools
import math
import re
import logging
import operator
import os
import posixpath
import time
from perfkitbenchmarker import data
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR
FLAGS = flags.FLAGS
YCSB_VERSION = '0.9.0'
YCSB_TAR_URL = ('https://github.com/brianfrankcooper/YCSB/releases/'
'download/{0}/ycsb-{0}.tar.gz').format(YCSB_VERSION)
YCSB_DIR = posixpath.join(INSTALL_DIR, 'ycsb')
YCSB_EXE = posixpath.join(YCSB_DIR, 'bin', 'ycsb')
_DEFAULT_PERCENTILES = 50, 75, 90, 95, 99, 99.9
# Binary operators to aggregate reported statistics.
# Statistics with operator 'None' will be dropped.
AGGREGATE_OPERATORS = {
'Operations': operator.add,
'RunTime(ms)': max,
'Return=0': operator.add,
'Return=-1': operator.add,
'Return=-2': operator.add,
'Return=-3': operator.add,
'Return=OK': operator.add,
'Return=ERROR': operator.add,
'LatencyVariance(ms)': None,
'AverageLatency(ms)': None, # Requires both average and # of ops.
'Throughput(ops/sec)': operator.add,
'95thPercentileLatency(ms)': None, # Calculated across clients.
'99thPercentileLatency(ms)': None, # Calculated across clients.
'MinLatency(ms)': min,
'MaxLatency(ms)': max}
flags.DEFINE_boolean('ycsb_histogram', False, 'Include individual '
'histogram results from YCSB (will increase sample '
'count).')
flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples '
'from pre-populating database.')
flags.DEFINE_boolean('ycsb_include_individual_results', False,
'Include results from each client VM, rather than just '
'combined results.')
flags.DEFINE_boolean('ycsb_reload_database', True,
'Reload database, othewise skip load stage. '
'Note, this flag is only used if the database '
'is already loaded.')
flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.',
lower_bound=1)
flags.DEFINE_list('ycsb_workload_files', ['workloada', 'workloadb'],
'Path to YCSB workload file to use during *run* '
'stage only. Comma-separated list')
flags.DEFINE_list('ycsb_load_parameters', [],
'Passed to YCSB during the load stage. Comma-separated list '
'of "key=value" pairs.')
flags.DEFINE_list('ycsb_run_parameters', [],
'Passed to YCSB during the load stage. Comma-separated list '
'of "key=value" pairs.')
flags.DEFINE_list('ycsb_threads_per_client', ['32'], 'Number of threads per '
'loader during the benchmark run. Specify a list to vary the '
'number of clients.')
flags.DEFINE_integer('ycsb_preload_threads', None, 'Number of threads per '
'loader during the initial data population stage. '
'Default value depends on the target DB.')
flags.DEFINE_integer('ycsb_record_count', 1000000, 'Pre-load with a total '
'dataset of records total.')
flags.DEFINE_integer('ycsb_operation_count', 1000000, 'Number of operations '
'*per client VM*.')
flags.DEFINE_integer('ycsb_timelimit', 1800, 'Maximum amount of time to run '
'each workload / client count combination. Set to 0 for '
'unlimited time.')
# Default loading thread count for non-batching backends.
DEFAULT_PRELOAD_THREADS = 32
def _GetThreadsPerLoaderList():
"""Returns the list of client counts per VM to use in staircase load."""
return [int(thread_count) for thread_count in FLAGS.ycsb_threads_per_client]
def _GetWorkloadFileList():
"""Returns the list of workload files to run.
Returns:
In order of preference:
* The argument to --ycsb_workload_files.
* Bundled YCSB workloads A and B.
"""
return [data.ResourcePath(workload)
for workload in FLAGS.ycsb_workload_files]
def CheckPrerequisites():
for workload_file in _GetWorkloadFileList():
if not os.path.exists(workload_file):
raise IOError('Missing workload file: {0}'.format(workload_file))
def _Install(vm):
"""Installs the YCSB package on the VM."""
vm.Install('openjdk')
vm.Install('curl')
vm.RemoteCommand(('mkdir -p {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
YCSB_DIR, YCSB_TAR_URL))
def YumInstall(vm):
"""Installs the YCSB package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the YCSB package on the VM."""
_Install(vm)
def ParseResults(ycsb_result_string, data_type='histogram'):
"""Parse YCSB results.
Example input:
YCSB Client 0.1
Command line: -db com.yahoo.ycsb.db.HBaseClient -P /tmp/pkb/workloada
[OVERALL], RunTime(ms), 1800413.0
[OVERALL], Throughput(ops/sec), 2740.503428935472
[UPDATE], Operations, 2468054
[UPDATE], AverageLatency(us), 2218.8513395574005
[UPDATE], MinLatency(us), 554
[UPDATE], MaxLatency(us), 352634
[UPDATE], 95thPercentileLatency(ms), 4
[UPDATE], 99thPercentileLatency(ms), 7
[UPDATE], Return=0, 2468054
[UPDATE], 0, 398998
[UPDATE], 1, 1015682
[UPDATE], 2, 532078
...
Args:
ycsb_result_string: str. Text output from YCSB.
data_type: Either 'histogram' or 'timeseries'.
Returns:
A dictionary with keys:
client: containing YCSB version information.
command_line: Command line executed.
groups: list of operation group descriptions, each with schema:
group: group name (e.g., update, insert, overall)
statistics: dict mapping from statistic name to value
histogram: list of (ms_lower_bound, count) tuples, e.g.:
[(0, 530), (19, 1)]
indicates that 530 ops took between 0ms and 1ms, and 1 took between
19ms and 20ms. Empty bins are not reported.
"""
# TODO: YCSB 0.9.0 output client and command line string to stderr, so
# we need to support it in the future.
lines = []
client_string = 'YCSB'
command_line = 'unknown'
fp = io.BytesIO(ycsb_result_string)
result_string = next(fp).strip()
def IsHeadOfResults(line):
return line.startswith('YCSB Client 0.') or line.startswith('[OVERALL]')
while not IsHeadOfResults(result_string):
result_string = next(fp).strip()
if result_string.startswith('YCSB Client 0.'):
client_string = result_string
command_line = next(fp).strip()
if not command_line.startswith('Command line:'):
raise IOError('Unexpected second line: {0}'.format(command_line))
elif result_string.startswith('[OVERALL]'): # YCSB > 0.7.0.
lines.append(result_string)
else:
# Received unexpected header
raise IOError('Unexpected header: {0}'.format(client_string))
# Some databases print additional output to stdout.
# YCSB results start with [<OPERATION_NAME>];
# filter to just those lines.
def LineFilter(line):
return re.search(r'^\[[A-Z]+\]', line) is not None
lines = itertools.chain(lines, itertools.ifilter(LineFilter, fp))
r = csv.reader(lines)
by_operation = itertools.groupby(r, operator.itemgetter(0))
result = collections.OrderedDict([
('client', client_string),
('command_line', command_line),
('groups', collections.OrderedDict())])
for operation, lines in by_operation:
operation = operation[1:-1].lower()
if operation == 'cleanup':
continue
op_result = {
'group': operation,
data_type: [],
'statistics': {}
}
for _, name, val in lines:
name = name.strip()
val = val.strip()
# Drop ">" from ">1000"
if name.startswith('>'):
name = name[1:]
val = float(val) if '.' in val else int(val)
if name.isdigit():
if val:
op_result[data_type].append((int(name), val))
else:
if '(us)' in name:
name = name.replace('(us)', '(ms)')
val /= 1000.0
op_result['statistics'][name] = val
result['groups'][operation] = op_result
return result
def _CumulativeSum(xs):
total = 0
for x in xs:
total += x
yield total
def _WeightedQuantile(x, weights, p):
"""Weighted quantile measurement for an ordered list.
This method interpolates to the higher value when the quantile is not a direct
member of the list. This works well for YCSB, since latencies are floored.
Args:
x: List of values.
weights: List of numeric weights.
p: float. Desired quantile in the interval [0, 1].
Returns:
float.
Raises:
ValueError: When 'x' and 'weights' are not the same length, or 'p' is not in
the interval [0, 1].
"""
if len(x) != len(weights):
raise ValueError('Lengths do not match: {0} != {1}'.format(
len(x), len(weights)))
if p < 0 or p > 1:
raise ValueError('Invalid quantile: {0}'.format(p))
n = sum(weights)
target = n * float(p)
cumulative = list(_CumulativeSum(weights))
# Find the first cumulative weight >= target
i = bisect.bisect_left(cumulative, target)
if i == len(x):
return x[-1]
else:
return x[i]
def _PercentilesFromHistogram(ycsb_histogram, percentiles=_DEFAULT_PERCENTILES):
"""Calculate percentiles for from a YCSB histogram.
Args:
ycsb_histogram: List of (time_ms, frequency) tuples.
percentiles: iterable of floats, in the interval [0, 100].
Returns:
dict, mapping from percentile to value.
"""
result = collections.OrderedDict()
histogram = sorted(ycsb_histogram)
for percentile in percentiles:
if percentile < 0 or percentile > 100:
raise ValueError('Invalid percentile: {0}'.format(percentile))
if math.modf(percentile)[0] < 1e-7:
percentile = int(percentile)
label = 'p{0}'.format(percentile)
latencies, freqs = zip(*histogram)
time_ms = _WeightedQuantile(latencies, freqs, percentile * 0.01)
result[label] = time_ms
return result
def _CombineResults(result_list, combine_histograms=True):
"""Combine results from multiple YCSB clients.
Reduces a list of YCSB results (the output of ParseResults)
into a single result. Histogram bin counts, operation counts, and throughput
are summed; RunTime is replaced by the maximum runtime of any result.
Args:
result_list: List of ParseResults outputs.
combine_histograms: If true, histogram bins are summed across results. If
not, no histogram will be returned. Defaults to True.
Returns:
A dictionary, as returned by ParseResults.
"""
def DropUnaggregated(result):
"""Remove statistics which 'operators' specify should not be combined."""
drop_keys = {k for k, v in AGGREGATE_OPERATORS.iteritems() if v is None}
for group in result['groups'].itervalues():
for k in drop_keys:
group['statistics'].pop(k, None)
def CombineHistograms(hist1, hist2):
h1 = dict(hist1)
h2 = dict(hist2)
keys = sorted(frozenset(h1) | frozenset(h2))
result = []
for k in keys:
result.append((k, h1.get(k, 0) + h2.get(k, 0)))
return result
result = copy.deepcopy(result_list[0])
DropUnaggregated(result)
for indiv in result_list[1:]:
for group_name, group in indiv['groups'].iteritems():
if group_name not in result['groups']:
logging.warn('Found result group "%s" in individual YCSB result, '
'but not in accumulator.', group_name)
result['groups'][group_name] = copy.deepcopy(group)
continue
# Combine reported statistics.
# If no combining operator is defined, the statistic is skipped.
# Otherwise, the aggregated value is either:
# * The value in 'indiv', if the statistic is not present in 'result' or
# * AGGREGATE_OPERATORS[statistic](result_value, indiv_value)
for k, v in group['statistics'].iteritems():
if k not in AGGREGATE_OPERATORS:
logging.warn('No operator for "%s". Skipping aggregation.', k)
continue
elif AGGREGATE_OPERATORS[k] is None: # Drop
result['groups'][group_name]['statistics'].pop(k, None)
continue
elif k not in result['groups'][group_name]['statistics']:
logging.warn('Found statistic "%s.%s" in individual YCSB result, '
'but not in accumulator.', group_name, k)
result['groups'][group_name]['statistics'][k] = copy.deepcopy(v)
continue
op = AGGREGATE_OPERATORS[k]
result['groups'][group_name]['statistics'][k] = (
op(result['groups'][group_name]['statistics'][k], v))
if combine_histograms:
result['groups'][group_name]['histogram'] = CombineHistograms(
result['groups'][group_name]['histogram'],
group['histogram'])
else:
result['groups'][group_name].pop('histogram', None)
result['client'] = ' '.join((result['client'], indiv['client']))
result['command_line'] = ';'.join((result['command_line'],
indiv['command_line']))
if 'target' in result and 'target' in indiv:
result['target'] += indiv['target']
return result
def _ParseWorkload(contents):
"""Parse a YCSB workload file.
YCSB workloads are Java .properties format.
http://en.wikipedia.org/wiki/.properties
This function does not support all .properties syntax, in particular escaped
newlines.
Args:
contents: str. Contents of the file.
Returns:
dict mapping from property key to property value for each property found in
'contents'.
"""
fp = io.BytesIO(contents)
result = {}
for line in fp:
if (line.strip() and not line.lstrip().startswith('#') and
not line.lstrip().startswith('!')):
k, v = re.split(r'\s*[:=]\s*', line, maxsplit=1)
result[k] = v.strip()
return result
def _CreateSamples(ycsb_result, include_histogram=True, **kwargs):
"""Create PKB samples from a YCSB result.
Args:
ycsb_result: dict. Result of ParseResults.
include_histogram: bool. If True, include records for each histogram bin.
**kwargs: Base metadata for each sample.
Returns:
List of sample.Sample objects.
"""
stage = 'load' if ycsb_result['command_line'].endswith('-load') else 'run'
base_metadata = {'command_line': ycsb_result['command_line'],
'stage': stage}
base_metadata.update(kwargs)
for group_name, group in ycsb_result['groups'].iteritems():
meta = base_metadata.copy()
meta['operation'] = group_name
for statistic, value in group['statistics'].iteritems():
if value is None:
continue
unit = ''
m = re.match(r'^(.*) *\((us|ms|ops/sec)\)$', statistic)
if m:
statistic = m.group(1)
unit = m.group(2)
yield sample.Sample(' '.join([group_name, statistic]), value, unit, meta)
if group['histogram']:
percentiles = _PercentilesFromHistogram(group['histogram'])
for label, value in percentiles.iteritems():
yield sample.Sample(' '.join([group_name, label, 'latency']),
value, 'ms', meta)
if include_histogram:
for time_ms, count in group['histogram']:
yield sample.Sample(
'{0}_latency_histogram_{1}_ms'.format(group_name, time_ms),
count, 'count', meta)
class YCSBExecutor(object):
"""Load data and run benchmarks using YCSB.
See core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java for
attribute descriptions.
Attributes:
database: str.
loaded: boolean. If the database is already loaded.
parameters: dict. May contain the following, plus database-specific fields
(e.g., columnfamily for HBase).
threads: int.
target: int.
fieldcount: int.
fieldlengthdistribution: str.
readallfields: boolean.
writeallfields: boolean.
readproportion: float.
updateproportion: float.
scanproportion: float.
readmodifywriteproportion: float.
requestdistribution: str.
maxscanlength: int. Number of records to scan.
scanlengthdistribution: str.
insertorder: str.
hotspotdatafraction: float.
perclientparam: list.
shardkeyspace: boolean. Default to False, indicates if clients should
have their own keyspace.
"""
FLAG_ATTRIBUTES = 'cp', 'jvm-args', 'target', 'threads'
def __init__(self, database, parameter_files=None, **kwargs):
self.database = database
self.loaded = False
self.parameter_files = parameter_files or []
self.parameters = kwargs.copy()
# Self-defined parameters, pop them out of self.parameters, so they
# are not passed to ycsb commands
self.perclientparam = self.parameters.pop('perclientparam', None)
self.shardkeyspace = self.parameters.pop('shardkeyspace', False)
def _BuildCommand(self, command_name, parameter_files=None, **kwargs):
command = [YCSB_EXE, command_name, self.database]
parameters = self.parameters.copy()
parameters.update(kwargs)
# These are passed as flags rather than properties, so they
# are handled differently.
for flag in self.FLAG_ATTRIBUTES:
value = parameters.pop(flag, None)
if value is not None:
command.extend(('-{0}'.format(flag), str(value)))
for param_file in list(self.parameter_files) + list(parameter_files or []):
command.extend(('-P', param_file))
for parameter, value in parameters.iteritems():
command.extend(('-p', '{0}={1}'.format(parameter, value)))
command.append('-p measurementtype=histogram')
return 'cd %s; %s' % (YCSB_DIR, ' '.join(command))
@property
def _default_preload_threads(self):
"""The default number of threads to use for pre-populating the DB."""
if FLAGS['ycsb_preload_threads'].present:
return FLAGS.ycsb_preload_threads
return DEFAULT_PRELOAD_THREADS
def _Load(self, vm, **kwargs):
"""Execute 'ycsb load' on 'vm'."""
kwargs.setdefault('threads', self._default_preload_threads)
kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)
for pv in FLAGS.ycsb_load_parameters:
param, value = pv.split('=', 1)
kwargs[param] = value
command = self._BuildCommand('load', **kwargs)
stdout, stderr = vm.RobustRemoteCommand(command)
return ParseResults(str(stderr + stdout))
def _LoadThreaded(self, vms, workload_file, **kwargs):
"""Runs "Load" in parallel for each VM in VMs.
Args:
vms: List of virtual machine instances. client nodes.
workload_file: YCSB Workload file to use.
**kwargs: Additional key-value parameters to pass to YCSB.
Returns:
List of sample.Sample objects.
"""
results = []
remote_path = posixpath.join(INSTALL_DIR,
os.path.basename(workload_file))
kwargs.setdefault('threads', self._default_preload_threads)
kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)
with open(workload_file) as fp:
workload_meta = _ParseWorkload(fp.read())
workload_meta.update(kwargs)
workload_meta.update(stage='load',
clients=len(vms) * kwargs['threads'],
threads_per_client_vm=kwargs['threads'],
workload_name=os.path.basename(workload_file))
self.workload_meta = workload_meta
record_count = int(workload_meta.get('recordcount', '1000'))
n_per_client = long(record_count) // len(vms)
loader_counts = [n_per_client +
(1 if i < (record_count % len(vms)) else 0)
for i in xrange(len(vms))]
def PushWorkload(vm):
vm.PushFile(workload_file, remote_path)
vm_util.RunThreaded(PushWorkload, vms)
kwargs['parameter_files'] = [remote_path]
def _Load(loader_index):
start = sum(loader_counts[:loader_index])
kw = copy.deepcopy(kwargs)
kw.update(insertstart=start,
insertcount=loader_counts[loader_index])
if self.perclientparam is not None:
kw.update(self.perclientparam[loader_index])
results.append(self._Load(vms[loader_index], **kw))
logging.info('VM %d (%s) finished', loader_index, vms[loader_index])
start = time.time()
vm_util.RunThreaded(_Load, range(len(vms)))
events.record_event.send(
type(self).__name__, event='load', start_timestamp=start,
end_timestamp=time.time(), metadata=copy.deepcopy(kwargs))
if len(results) != len(vms):
raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
len(results), len(vms), results))
samples = []
if FLAGS.ycsb_include_individual_results and len(results) > 1:
for i, result in enumerate(results):
samples.extend(_CreateSamples(
result, result_type='individual', result_index=i,
include_histogram=FLAGS.ycsb_histogram,
**workload_meta))
combined = _CombineResults(results)
samples.extend(_CreateSamples(
combined, result_type='combined',
include_histogram=FLAGS.ycsb_histogram,
**workload_meta))
return samples
def _Run(self, vm, **kwargs):
"""Run a single workload from a client vm."""
for pv in FLAGS.ycsb_run_parameters:
param, value = pv.split('=', 1)
kwargs[param] = value
command = self._BuildCommand('run', **kwargs)
# YCSB version greater than 0.7.0 output some of the
# info we need to stderr. So we have to combine these 2
# output to get expected results.
stdout, stderr = vm.RobustRemoteCommand(command)
return ParseResults(str(stderr + stdout))
def _RunThreaded(self, vms, **kwargs):
"""Run a single workload using `vms`."""
target = kwargs.pop('target', None)
if target is not None:
target_per_client = target // len(vms)
targets = [target_per_client +
(1 if i < (target % len(vms)) else 0)
for i in xrange(len(vms))]
else:
targets = [target for _ in vms]
results = []
if self.shardkeyspace:
record_count = int(self.workload_meta.get('recordcount', '1000'))
n_per_client = long(record_count) // len(vms)
loader_counts = [n_per_client +
(1 if i < (record_count % len(vms)) else 0)
for i in xrange(len(vms))]
def _Run(loader_index):
vm = vms[loader_index]
params = copy.deepcopy(kwargs)
params['target'] = targets[loader_index]
if self.perclientparam is not None:
params.update(self.perclientparam[loader_index])
if self.shardkeyspace:
start = sum(loader_counts[:loader_index])
end = start + loader_counts[loader_index]
params.update(insertstart=start,
recordcount=end)
results.append(self._Run(vm, **params))
logging.info('VM %d (%s) finished', loader_index, vm)
vm_util.RunThreaded(_Run, range(len(vms)))
if len(results) != len(vms):
raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
len(results), len(vms), results))
return results
def RunStaircaseLoads(self, vms, workloads, **kwargs):
"""Run each workload in 'workloads' in succession.
A staircase load is applied for each workload file, for each entry in
ycsb_threads_per_client.
Args:
vms: List of VirtualMachine objects to generate load from.
**kwargs: Additional parameters to pass to each run. See constructor for
options.
Returns:
List of sample.Sample objects.
"""
all_results = []
for workload_index, workload_file in enumerate(workloads):
parameters = {'operationcount': FLAGS.ycsb_operation_count,
'recordcount': FLAGS.ycsb_record_count}
if FLAGS.ycsb_timelimit:
parameters['maxexecutiontime'] = FLAGS.ycsb_timelimit
parameters.update(kwargs)
remote_path = posixpath.join(INSTALL_DIR,
os.path.basename(workload_file))
with open(workload_file) as fp:
workload_meta = _ParseWorkload(fp.read())
workload_meta.update(kwargs)
workload_meta.update(workload_name=os.path.basename(workload_file),
workload_index=workload_index,
stage='run')
def PushWorkload(vm):
vm.PushFile(workload_file, remote_path)
vm_util.RunThreaded(PushWorkload, vms)
parameters['parameter_files'] = [remote_path]
for client_count in _GetThreadsPerLoaderList():
parameters['threads'] = client_count
start = time.time()
results = self._RunThreaded(vms, **parameters)
events.record_event.send(
type(self).__name__, event='run', start_timestamp=start,
end_timestamp=time.time(), metadata=copy.deepcopy(parameters))
client_meta = workload_meta.copy()
client_meta.update(clients=len(vms) * client_count,
threads_per_client_vm=client_count)
if FLAGS.ycsb_include_individual_results and len(results) > 1:
for i, result in enumerate(results):
all_results.extend(_CreateSamples(
result,
result_type='individual',
result_index=i,
include_histogram=FLAGS.ycsb_histogram,
**client_meta))
combined = _CombineResults(results)
all_results.extend(_CreateSamples(
combined, result_type='combined',
include_histogram=FLAGS.ycsb_histogram,
**client_meta))
return all_results
def LoadAndRun(self, vms, workloads=None, load_kwargs=None, run_kwargs=None):
"""Load data using YCSB, then run each workload/client count combination.
Loads data using the workload defined by 'workloads', then
executes YCSB for each workload file in 'workloads', for each
client count defined in FLAGS.ycsb_threads_per_client.
Generally database benchmarks using YCSB should only need to call this
method.
Args:
vms: List of virtual machines. VMs to use to generate load.
workloads: List of strings. Workload files to use. If unspecified,
_GetWorkloadFileList() is used.
load_kwargs: dict. Additional arguments to pass to the load stage.
run_kwargs: dict. Additional arguments to pass to the run stage.
Returns:
List of sample.Sample objects.
"""
workloads = workloads or _GetWorkloadFileList()
load_samples = []
assert workloads, 'no workloads'
if FLAGS.ycsb_reload_database or not self.loaded:
load_samples += list(self._LoadThreaded(
vms, workloads[0], **(load_kwargs or {})))
self.loaded = True
run_samples = list(self.RunStaircaseLoads(vms, workloads,
**(run_kwargs or {})))
if FLAGS.ycsb_load_samples:
return load_samples + run_samples
else:
return run_samples
|
apache-2.0
| -7,639,080,925,814,991,000 | -9,148,906,850,950,034,000 | 34.85125 | 80 | 0.644364 | false |
dfstrauss/textmagic-sms-api-python
|
textmagic/gsm0338.py
|
3
|
5571
|
"""
Check whether a string consists entirely of characters in the GSM 03.38
character set.
Pass your Unicode or ASCII string to is_gsm() to determine whether all
characters in the string are from the GSM 03.38 character set.
"""
#GSM 03.38 character set mapping to Unicode is specified here:
# http://unicode.org/Public/MAPPINGS/ETSI/GSM0338.TXT
#This code was translated from the C++ snippet at:
# http://stackoverflow.com/questions/27599/reliable-sms-unicode-gsm-encoding-in-php
#contributed by Magnus Westin:
# http://stackoverflow.com/users/2957/magnus-westin
UCS2_TO_GSM_LOOKUP_TABLE_SIZE = 0x100
NON_GSM = 0x80
UCS2_GCL_RANGE = 24
UCS2_GREEK_CAPITAL_LETTER_ALPHA = 0x0391
EXTEND = 0x001B
# note that the ` character is mapped to ' so that all characters that can be typed on
# a standard north american keyboard can be converted to the GSM default character set
ucs2_to_gsm = [
# +0x0 +0x1 +0x2 +0x3 +0x4 +0x5 +0x6 +0x7
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x00
NON_GSM, NON_GSM, 0x0a, NON_GSM, NON_GSM, 0x0d, NON_GSM, NON_GSM, # 0x08
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x10
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x18
0x20, 0x21, 0x22, 0x23, 0x02, 0x25, 0x26, 0x27, # 0x20
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, # 0x28
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, # 0x30
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, # 0x38
0x00, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, # 0x40
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, # 0x48
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, # 0x50
0x58, 0x59, 0x5a, EXTEND, EXTEND, EXTEND, EXTEND, 0x11, # 0x58
0x27, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, # 0x60
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, # 0x68
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, # 0x70
0x78, 0x79, 0x7a, EXTEND, EXTEND, EXTEND, EXTEND, NON_GSM, # 0x78
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x80
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x88
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x90
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x98
NON_GSM, 0x40, NON_GSM, 0x01, 0x24, 0x03, NON_GSM, 0x5f, # 0xA0
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0xA8
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0xB0
NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x60, # 0xB8
NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x5b, 0x0e, 0x1c, 0x09, # 0xC0
NON_GSM, 0x1f, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x60, # 0xC8
NON_GSM, 0x5d, NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x5c, NON_GSM, # 0xD0
0x0b, NON_GSM, NON_GSM, NON_GSM, 0x5e, NON_GSM, NON_GSM, 0x1e, # 0xD8
0x7f, NON_GSM, NON_GSM, NON_GSM, 0x7b, 0x0f, 0x1d, NON_GSM, # 0xE0
0x04, 0x05, NON_GSM, NON_GSM, 0x07, NON_GSM, NON_GSM, NON_GSM, # 0xE8
NON_GSM, 0x7d, 0x08, NON_GSM, NON_GSM, NON_GSM, 0x7c, NON_GSM, # 0xF0
0x0c, 0x06, NON_GSM, NON_GSM, 0x7e, NON_GSM, NON_GSM, NON_GSM # 0xF8
]
ucs2_gcl_to_gsm = [
0x41, # Alpha A
0x42, # Beta B
0x13, # Gamma
0x10, # Delta
0x45, # Epsilon E
0x5A, # Zeta Z
0x48, # Eta H
0x19, # Theta
0x49, # Iota I
0x4B, # Kappa K
0x14, # Lambda
0x4D, # Mu M
0x4E, # Nu N
0x1A, # Xi
0x4F, # Omicron O
0x16, # Pi
0x50, # Rho P
NON_GSM,
0x18, # Sigma
0x54, # Tau T
0x59, # Upsilon Y
0x12, # Phi
0x58, # Chi X
0x17, # Psi
0x15 # Omega
]
def not_gsm(char):
"""Is this character NOT in the GSM 03.38 character set?"""
result = True
ordinal = ord(char)
if(ordinal < UCS2_TO_GSM_LOOKUP_TABLE_SIZE):
result = (ucs2_to_gsm[ordinal] == NON_GSM)
elif((ordinal >= UCS2_GREEK_CAPITAL_LETTER_ALPHA) and
(ordinal <= (UCS2_GREEK_CAPITAL_LETTER_ALPHA + UCS2_GCL_RANGE))):
result = (ucs2_gcl_to_gsm[ordinal - UCS2_GREEK_CAPITAL_LETTER_ALPHA] == NON_GSM)
elif(ordinal == 0x20AC): # Euro sign
result = False;
return result;
def is_gsm(string):
"""Does the string consist entirely of GSM03.38 characters?"""
assert isinstance(string, basestring)
for ch in string:
if (not_gsm(ch)):
return False
return True
|
bsd-3-clause
| -6,720,417,502,999,998,000 | 3,980,044,815,063,401,000 | 51.065421 | 102 | 0.508347 | false |
jlmadurga/django-oscar
|
src/oscar/apps/offer/migrations/0001_initial.py
|
52
|
15207
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
from decimal import Decimal
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Benefit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(verbose_name='Type', max_length=128, blank=True, choices=[('Percentage', "Discount is a percentage off of the product's value"), ('Absolute', "Discount is a fixed amount off of the product's value"), ('Multibuy', 'Discount is to give the cheapest product for free'), ('Fixed price', 'Get the products that meet the condition for a fixed price'), ('Shipping absolute', 'Discount is a fixed amount of the shipping cost'), ('Shipping fixed price', 'Get shipping for a fixed price'), ('Shipping percentage', 'Discount is a percentage off of the shipping cost')])),
('value', oscar.models.fields.PositiveDecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Value', null=True)),
('max_affected_items', models.PositiveIntegerField(verbose_name='Max Affected Items', blank=True, help_text='Set this to prevent the discount consuming all items within the range that are in the basket.', null=True)),
('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)),
],
options={
'verbose_name_plural': 'Benefits',
'verbose_name': 'Benefit',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Condition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(verbose_name='Type', max_length=128, blank=True, choices=[('Count', 'Depends on number of items in basket that are in condition range'), ('Value', 'Depends on value of items in basket that are in condition range'), ('Coverage', 'Needs to contain a set number of DISTINCT items from the condition range')])),
('value', oscar.models.fields.PositiveDecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Value', null=True)),
('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)),
],
options={
'verbose_name_plural': 'Conditions',
'verbose_name': 'Condition',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ConditionalOffer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(verbose_name='Name', unique=True, max_length=128, help_text="This is displayed within the customer's basket")),
('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)),
('description', models.TextField(verbose_name='Description', help_text='This is displayed on the offer browsing page', blank=True)),
('offer_type', models.CharField(default='Site', max_length=128, verbose_name='Type', choices=[('Site', 'Site offer - available to all users'), ('Voucher', 'Voucher offer - only available after entering the appropriate voucher code'), ('User', 'User offer - available to certain types of user'), ('Session', 'Session offer - temporary offer, available for a user for the duration of their session')])),
('status', models.CharField(default='Open', max_length=64, verbose_name='Status')),
('priority', models.IntegerField(default=0, verbose_name='Priority', help_text='The highest priority offers are applied first')),
('start_datetime', models.DateTimeField(blank=True, verbose_name='Start date', null=True)),
('end_datetime', models.DateTimeField(verbose_name='End date', blank=True, help_text="Offers are active until the end of the 'end date'", null=True)),
('max_global_applications', models.PositiveIntegerField(verbose_name='Max global applications', blank=True, help_text='The number of times this offer can be used before it is unavailable', null=True)),
('max_user_applications', models.PositiveIntegerField(verbose_name='Max user applications', blank=True, help_text='The number of times a single user can use this offer', null=True)),
('max_basket_applications', models.PositiveIntegerField(verbose_name='Max basket applications', blank=True, help_text='The number of times this offer can be applied to a basket (and order)', null=True)),
('max_discount', models.DecimalField(verbose_name='Max discount', max_digits=12, decimal_places=2, null=True, help_text='When an offer has given more discount to orders than this threshold, then the offer becomes unavailable', blank=True)),
('total_discount', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total Discount')),
('num_applications', models.PositiveIntegerField(default=0, verbose_name='Number of applications')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Number of Orders')),
('redirect_url', oscar.models.fields.ExtendedURLField(verbose_name='URL redirect (optional)', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('benefit', models.ForeignKey(verbose_name='Benefit', to='offer.Benefit')),
('condition', models.ForeignKey(verbose_name='Condition', to='offer.Condition')),
],
options={
'ordering': ['-priority'],
'verbose_name_plural': 'Conditional offers',
'verbose_name': 'Conditional offer',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Range',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(unique=True, max_length=128, verbose_name='Name')),
('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)),
('description', models.TextField(blank=True)),
('is_public', models.BooleanField(default=False, verbose_name='Is public?', help_text='Public ranges have a customer-facing page')),
('includes_all_products', models.BooleanField(default=False, verbose_name='Includes all products?')),
('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('classes', models.ManyToManyField(related_name='classes', verbose_name='Product Types', to='catalogue.ProductClass', blank=True)),
('excluded_products', models.ManyToManyField(related_name='excludes', verbose_name='Excluded Products', to='catalogue.Product', blank=True)),
('included_categories', models.ManyToManyField(related_name='includes', verbose_name='Included Categories', to='catalogue.Category', blank=True)),
],
options={
'verbose_name_plural': 'Ranges',
'verbose_name': 'Range',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RangeProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_order', models.IntegerField(default=0)),
('product', models.ForeignKey(to='catalogue.Product')),
('range', models.ForeignKey(to='offer.Range')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RangeProductFileUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filepath', models.CharField(max_length=255, verbose_name='File Path')),
('size', models.PositiveIntegerField(verbose_name='Size')),
('date_uploaded', models.DateTimeField(auto_now_add=True, verbose_name='Date Uploaded')),
('status', models.CharField(default='Pending', max_length=32, verbose_name='Status', choices=[('Pending', 'Pending'), ('Failed', 'Failed'), ('Processed', 'Processed')])),
('error_message', models.CharField(max_length=255, verbose_name='Error Message', blank=True)),
('date_processed', models.DateTimeField(verbose_name='Date Processed', null=True)),
('num_new_skus', models.PositiveIntegerField(verbose_name='Number of New SKUs', null=True)),
('num_unknown_skus', models.PositiveIntegerField(verbose_name='Number of Unknown SKUs', null=True)),
('num_duplicate_skus', models.PositiveIntegerField(verbose_name='Number of Duplicate SKUs', null=True)),
('range', models.ForeignKey(verbose_name='Range', related_name='file_uploads', to='offer.Range')),
('uploaded_by', models.ForeignKey(verbose_name='Uploaded By', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date_uploaded',),
'verbose_name_plural': 'Range Product Uploaded Files',
'verbose_name': 'Range Product Uploaded File',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='rangeproduct',
unique_together=set([('range', 'product')]),
),
migrations.AddField(
model_name='range',
name='included_products',
field=models.ManyToManyField(related_name='includes', verbose_name='Included Products', to='catalogue.Product', through='offer.RangeProduct', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='condition',
name='range',
field=models.ForeignKey(null=True, verbose_name='Range', to='offer.Range', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='benefit',
name='range',
field=models.ForeignKey(null=True, verbose_name='Range', to='offer.Range', blank=True),
preserve_default=True,
),
migrations.CreateModel(
name='AbsoluteDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Absolute discount benefits',
'verbose_name': 'Absolute discount benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='CountCondition',
fields=[
],
options={
'verbose_name_plural': 'Count conditions',
'verbose_name': 'Count condition',
'proxy': True,
},
bases=('offer.condition',),
),
migrations.CreateModel(
name='CoverageCondition',
fields=[
],
options={
'verbose_name_plural': 'Coverage Conditions',
'verbose_name': 'Coverage Condition',
'proxy': True,
},
bases=('offer.condition',),
),
migrations.CreateModel(
name='FixedPriceBenefit',
fields=[
],
options={
'verbose_name_plural': 'Fixed price benefits',
'verbose_name': 'Fixed price benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='MultibuyDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Multibuy discount benefits',
'verbose_name': 'Multibuy discount benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='PercentageDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Percentage discount benefits',
'verbose_name': 'Percentage discount benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='ShippingBenefit',
fields=[
],
options={
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='ShippingAbsoluteDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Shipping absolute discount benefits',
'verbose_name': 'Shipping absolute discount benefit',
'proxy': True,
},
bases=('offer.shippingbenefit',),
),
migrations.CreateModel(
name='ShippingFixedPriceBenefit',
fields=[
],
options={
'verbose_name_plural': 'Fixed price shipping benefits',
'verbose_name': 'Fixed price shipping benefit',
'proxy': True,
},
bases=('offer.shippingbenefit',),
),
migrations.CreateModel(
name='ShippingPercentageDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Shipping percentage discount benefits',
'verbose_name': 'Shipping percentage discount benefit',
'proxy': True,
},
bases=('offer.shippingbenefit',),
),
migrations.CreateModel(
name='ValueCondition',
fields=[
],
options={
'verbose_name_plural': 'Value conditions',
'verbose_name': 'Value condition',
'proxy': True,
},
bases=('offer.condition',),
),
]
|
bsd-3-clause
| 906,098,645,290,046,300 | -1,325,494,864,203,455,700 | 54.097826 | 602 | 0.578944 | false |
jsoref/django
|
django/db/backends/postgresql/schema.py
|
202
|
4100
|
import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s USING %(column)s::%(type)s"
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s"
sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s"
def quote_value(self, value):
return psycopg2.extensions.adapt(value)
def _model_indexes_sql(self, model):
output = super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return output
for field in model._meta.local_fields:
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if '[' in db_type:
continue
if db_type.startswith('varchar'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_varchar_index))
elif db_type.startswith('text'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_text_index))
return output
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
if new_type.lower() == "serial":
column = new_field.column
sequence_name = "%s_%s_seq" % (table, column)
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": "integer",
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_create_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_alter_column % {
"table": self.quote_name(table),
"changes": self.sql_alter_column_default % {
"column": self.quote_name(column),
"default": "nextval('%s')" % self.quote_name(sequence_name),
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": self.quote_name(table),
"column": self.quote_name(column),
"sequence": self.quote_name(sequence_name),
},
[],
),
],
)
else:
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(
table, old_field, new_field, new_type
)
|
bsd-3-clause
| -6,207,032,959,530,667,000 | 3,934,518,269,333,977,000 | 41.708333 | 110 | 0.467561 | false |
erjohnso/ansible
|
lib/ansible/modules/notification/hipchat.py
|
49
|
6387
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hipchat
version_added: "1.2"
short_description: Send a message to Hipchat.
description:
- Send a message to a Hipchat room, with options to control the formatting.
options:
token:
description:
- API token.
required: true
room:
description:
- ID or name of the room.
required: true
from:
description:
- Name the message will appear to be sent from. Max length is 15
characters - above this it will be truncated.
required: false
default: Ansible
msg:
description:
- The message body.
required: true
default: null
color:
description:
- Background color for the message.
required: false
default: yellow
choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
msg_format:
description:
- Message format.
required: false
default: text
choices: [ "text", "html" ]
notify:
description:
- If true, a notification will be triggered for users in the room.
required: false
default: 'yes'
choices: [ "yes", "no" ]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
api:
description:
- API url if using a self-hosted hipchat server. For Hipchat API version
2 use the default URI with C(/v2) instead of C(/v1).
required: false
default: 'https://api.hipchat.com/v1'
version_added: 1.6.0
requirements: [ ]
author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)"
'''
EXAMPLES = '''
- hipchat:
room: notif
msg: Ansible task finished
# Use Hipchat API version 2
- hipchat:
api: https://api.hipchat.com/v2/
token: OAUTH2_TOKEN
room: notify
msg: Ansible task finished
'''
# ===========================================
# HipChat module specific support methods.
#
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six.moves.urllib.request import pathname2url
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
DEFAULT_URI = "https://api.hipchat.com/v1"
MSG_URI_V1 = "/rooms/message"
NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=MSG_URI_V1):
'''sending message to hipchat v1 server'''
params = {}
params['room_id'] = room
params['from'] = msg_from[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['api'] = api
params['notify'] = int(notify)
url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
data = urlencode(params)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=NOTIFY_URI_V2):
'''sending message to hipchat v2 server'''
headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
body = dict()
body['message'] = msg
body['color'] = color
body['message_format'] = msg_format
body['notify'] = notify
POST_URL = api + NOTIFY_URI_V2
url = POST_URL.replace('{id_or_name}', pathname2url(room))
data = json.dumps(body)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
# https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
# 204 to be the expected result code.
if info['status'] in [200, 204]:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
msg_from=dict(default="Ansible", aliases=['from']),
color=dict(default="yellow", choices=["yellow", "red", "green",
"purple", "gray", "random"]),
msg_format=dict(default="text", choices=["text", "html"]),
notify=dict(default=True, type='bool'),
validate_certs=dict(default='yes', type='bool'),
api=dict(default=DEFAULT_URI),
),
supports_check_mode=True
)
token = module.params["token"]
room = str(module.params["room"])
msg = module.params["msg"]
msg_from = module.params["msg_from"]
color = module.params["color"]
msg_format = module.params["msg_format"]
notify = module.params["notify"]
api = module.params["api"]
try:
if api.find('/v2') != -1:
send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
else:
send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
except Exception as e:
module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
changed = True
module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,008,763,781,086,434,000 | 4,458,730,070,536,443,000 | 28.298165 | 103 | 0.616721 | false |
jcrist/pydy
|
pydy/viz/shapes.py
|
4
|
19419
|
#!/usr/bin/env python
__all__ = ['Cube',
'Cylinder',
'Cone',
'Sphere',
'Circle',
'Plane',
'Tetrahedron',
'Octahedron',
'Icosahedron',
'Torus',
'TorusKnot',
'Tube']
import numpy as np
# This is a list of ColorKeywords from THREE.js
THREE_COLORKEYWORDS = ['aliceblue', 'antiquewhite', 'aqua',
'aquamarine', 'azure', 'beige', 'bisque',
'black', 'blanchedalmond', 'blue', 'blueviolet',
'brown', 'burlywood', 'cadetblue', 'chartreuse',
'chocolate', 'coral', 'cornflowerblue',
'cornsilk', 'crimson', 'cyan', 'darkblue',
'darkcyan', 'darkgoldenrod', 'darkgray',
'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange',
'darkorchid', 'darkred', 'darksalmon',
'darkseagreen', 'darkslateblue', 'darkslategray',
'darkslategrey', 'darkturquoise', 'darkviolet',
'deeppink', 'deepskyblue', 'dimgray', 'dimgrey',
'dodgerblue', 'firebrick', 'floralwhite',
'forestgreen', 'fuchsia', 'gainsboro',
'ghostwhite', 'gold', 'goldenrod', 'gray',
'green', 'greenyellow', 'grey', 'honeydew',
'hotpink', 'indianred', 'indigo', 'ivory',
'khaki', 'lavender', 'lavenderblush',
'lawngreen', 'lemonchiffon', 'lightblue',
'lightcoral', 'lightcyan',
'lightgoldenrodyellow', 'lightgray',
'lightgreen', 'lightgrey', 'lightpink',
'lightsalmon', 'lightseagreen', 'lightskyblue',
'lightslategray', 'lightslategrey',
'lightsteelblue', 'lightyellow', 'lime',
'limegreen', 'linen', 'magenta', 'maroon',
'mediumaquamarine', 'mediumblue',
'mediumorchid', 'mediumpurple', 'mediumseagreen',
'mediumslateblue', 'mediumspringgreen',
'mediumturquoise', 'mediumvioletred',
'midnightblue', 'mintcream', 'mistyrose',
'moccasin', 'navajowhite', 'navy', 'oldlace',
'olive', 'olivedrab', 'orange', 'orangered',
'orchid', 'palegoldenrod', 'palegreen',
'paleturquoise', 'palevioletred', 'papayawhip',
'peachpuff', 'peru', 'pink', 'plum',
'powderblue', 'purple', 'red', 'rosybrown',
'royalblue', 'saddlebrown', 'salmon',
'sandybrown', 'seagreen', 'seashell', 'sienna',
'silver', 'skyblue', 'slateblue', 'slategray',
'slategrey', 'snow', 'springgreen', 'steelblue',
'tan', 'teal', 'thistle', 'tomato', 'turquoise',
'violet', 'wheat', 'white', 'whitesmoke',
'yellow', 'yellowgreen']
MATERIALS = ["default", "checkerboard", "metal", "dirt", "foil", "water",
"grass"]
class Shape(object):
"""Instantiates a shape. This is primarily used as a superclass for more
specific shapes like Cube, Cylinder, Sphere etc.
Shapes must be associated with a reference frame and a point using the
VisualizationFrame class.
Parameters
==========
name : str, optional
A name assigned to the shape.
color : str, optional
A color string from list of colors in THREE_COLORKEYWORDS
Examples
========
>>> from pydy.viz.shapes import Shape
>>> s = Shape()
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> a = Shape(name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
"""
def __init__(self, name='unnamed', color='grey', material="default"):
self.name = name
self.color = color
self.material = material
self.geometry_attrs = []
def __str__(self):
attributes = ([self.__class__.__name__,
self.name,
'color:' + self.color,
'material:' + self.material] +
sorted([attr + ':{}'.format(getattr(self, attr)) for
attr in self.geometry_attrs]))
return ' '.join(['{}'] * len(attributes)).format(*attributes)
def __repr__(self):
return self.__class__.__name__
@property
def name(self):
"""Returns the name attribute of the shape."""
return self._name
@name.setter
def name(self, new_name):
"""Sets the name attribute of the shape."""
if not isinstance(new_name, str):
raise TypeError("'name' should be a valid str object.")
else:
self._name = new_name
@property
def color(self):
"""Returns the color attribute of the shape."""
return self._color
@color.setter
def color(self, new_color):
"""Sets the color attributes of the shape. This should be a valid
three.js color keyword string."""
if new_color not in THREE_COLORKEYWORDS:
msg = "'color' should be a valid Three.js colors string:\n{}"
raise ValueError(msg.format('\n'.join(THREE_COLORKEYWORDS)))
else:
self._color = new_color
@property
def material(self):
"""Returns the material attribute of the shape."""
return self._material
@material.setter
def material(self, new_material):
"""Sets the material attribute of the shape, i.e. its shine,
brightness, opacity etc.. The material should be a valid material
from the listed MATERIALS. If a shape is attributed as "red" color,
and "water" material, ideally it should have opacity and brightness
properties like that of a red fluid.
"""
if new_material.lower() not in MATERIALS:
msg = "'material' is not valid. Choose from:\n{}"
raise ValueError(msg.format('\n'.join(MATERIALS)))
else:
self._material = new_material
def generate_dict(self, constant_map={}):
"""Returns a dictionary containing all the data associated with the
Shape.
Parameters
==========
constant_map : dictionary
If any of the shape's geometry are defined as SymPy expressions,
then this dictionary should map all SymPy Symbol's found in the
expressions to floats.
"""
data_dict = {}
data_dict['name'] = self.name
data_dict['color'] = self.color
data_dict['material'] = self.material
data_dict['type'] = self.__repr__()
for geom in self.geometry_attrs:
atr = getattr(self, geom)
try:
data_dict[geom] = float(atr.subs(constant_map))
except AttributeError:
# not a SymPy expression
data_dict[geom] = atr
except TypeError:
# can't convert expression to float
raise TypeError('{} is an expression, you '.format(atr) +
'must provide a mapping to numerical values.')
return data_dict
class Cube(Shape):
"""Instantiates a cube of a given size.
Parameters
==========
length : float or SymPy expression
The length of the cube.
Examples
========
>>> from pydy.viz.shapes import Cube
>>> s = Cube(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.length
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> a = Cube('my-shape2', 'red', length=10)
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
"""
def __init__(self, length, **kwargs):
super(Cube, self).__init__(**kwargs)
self.geometry_attrs.append('length')
self.length = length
class Cylinder(Shape):
"""Instantiates a cylinder with given length and radius.
Parameters
==========
length : float or SymPy expression
The length of the cylinder.
radius : float or SymPy expression
The radius of the cylinder.
Examples
========
>>> from pydy.viz.shapes import Cylinder
>>> s = Cylinder(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.radius = 6.0
>>> s.radius
6.0
>>> a = Cylinder(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.radius
5.0
"""
def __init__(self, length, radius, **kwargs):
super(Cylinder, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'radius']
self.length = length
self.radius = radius
class Cone(Shape):
"""Instantiates a cone with given length and base radius.
Parameters
==========
length : float or SymPy expression
The length of the cone.
radius : float or SymPy expression
The base radius of the cone.
Examples
========
>>> from pydy.viz.shapes import Cone
>>> s = Cone(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.radius = 6.0
>>> s.radius
6.0
>>> a = Cone(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.radius
5.0
"""
def __init__(self, length, radius, **kwargs):
super(Cone, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'radius']
self.length = length
self.radius = radius
class Sphere(Shape):
"""Instantiates a sphere with a given radius.
Parameters
==========
radius : float or SymPy expression
The radius of the sphere.
Examples
========
>>> from pydy.viz.shapes import Sphere
>>> s = Sphere(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Sphere(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
def __init__(self, radius=10.0, **kwargs):
super(Sphere, self).__init__(**kwargs)
self.geometry_attrs += ['radius']
self.radius = radius
class Circle(Sphere):
"""Instantiates a circle with a given radius.
Parameters
==========
radius : float or SymPy Expression
The radius of the circle.
Examples
========
>>> from pydy.viz.shapes import Circle
>>> s = Circle(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Circle(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Plane(Shape):
"""Instantiates a plane with a given length and width.
Parameters
==========
length : float or SymPy expression
The length of the plane.
width : float or SymPy expression
The width of the plane.
Examples
========
>>> from pydy.viz.shapes import Plane
>>> s = Plane(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.width
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.width = 6.0
>>> s.width
6.0
>>> a = Plane(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.width
5.0
"""
def __init__(self, length=10.0, width=5.0, **kwargs):
super(Plane, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'width']
self.length = length
self.width = width
class Tetrahedron(Sphere):
"""Instantiates a Tetrahedron inscribed in a given radius circle.
Parameters
==========
radius : float or SymPy expression
The radius of the circum-scribing sphere of around the tetrahedron.
Examples
========
>>> from pydy.viz.shapes import Tetrahedron
>>> s = Tetrahedron(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Tetrahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Octahedron(Sphere):
"""Instantiaties an Octahedron inscribed in a circle of the given
radius.
Parameters
==========
radius : float or SymPy expression.
The radius of the circum-scribing sphere around the octahedron.
Examples
========
>>> from pydy.viz.shapes import Octahedron
>>> s = Octahedron(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Octahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Icosahedron(Sphere):
"""Instantiates an icosahedron inscribed in a sphere of the given
radius.
Parameters
==========
radius : float or a SymPy expression
Radius of the circum-scribing sphere for Icosahedron
Examples
========
>>> from pydy.viz.shapes import Icosahedron
>>> s = Icosahedron(10)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>>#These can be changed later too ..
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12
>>> a = Icosahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Torus(Shape):
"""Instantiates a torus with a given radius and section radius.
Parameters
==========
radius : float or SymPy expression
The radius of the torus.
tube_radius : float or SymPy expression
The radius of the torus tube.
Examples
========
>>> from pydy.viz.shapes import Torus
>>> s = Torus(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.radius
10.0
>>> s.tube_radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> s.tube_radius = 6.0
>>> s.tube_radius
6.0
>>> a = Torus(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
>>> a.tube_radius
5.0
"""
def __init__(self, radius, tube_radius, **kwargs):
super(Torus, self).__init__(**kwargs)
self.geometry_attrs += ['radius', 'tube_radius']
self.radius = radius
self.tube_radius = tube_radius
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, new_radius):
self._radius = new_radius
@property
def tube_radius(self):
return self._tube_radius
@tube_radius.setter
def tube_radius(self, new_tube_radius):
self._tube_radius = new_tube_radius
class TorusKnot(Torus):
"""Instantiates a torus knot with given radius and section radius.
Parameters
==========
radius : float or SymPy expression
The radius of the torus knot.
tube_radius : float or SymPy expression
The radius of the torus knot tube.
Examples
========
>>> from pydy.viz.shapes import TorusKnot
>>> s = TorusKnot(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.radius
10.0
>>> s.tube_radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> s.tube_radius = 6.0
>>> s.tube_radius
6.0
>>> a = TorusKnot(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
>>> a.tube_radius
5.0
"""
class Tube(Shape):
"""Instantiates a tube that sweeps along a path.
Parameters
==========
radius : float or SymPy expression
The radius of the tube.
points : array_like, shape(n, 3)
An array of n (x, y, z) coordinates representing points that the
tube's center line should follow.
Examples
========
>>> from pydy.viz.shapes import Tube
>>> points = [[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
>>> s = Tube(10.0, points)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.points
[[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 14.0
>>> s.radius
14.0
>>> s.points = [[2.0, 1.0, 4.0], [1.0, 2.0, 4.0],
... [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]]
>>> s.points
[[2.0, 1.0, 4.0], [1.0, 2.0, 4.0], [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]]
>>> a = Tube(12.0, points, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
12.0
>>> a.points
[[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
"""
def __init__(self, radius, points, **kwargs):
super(Tube, self).__init__(**kwargs)
self.geometry_attrs += ['radius', 'points']
self.radius = radius
self.points = points
@property
def points(self):
return self._points
@points.setter
def points(self, new_points):
self._points = np.asarray(new_points)
|
bsd-3-clause
| 1,072,054,246,041,783,600 | -9,089,903,278,016,062,000 | 23.48802 | 78 | 0.504351 | false |
vene/ambra
|
ambra/cross_validation.py
|
1
|
9371
|
import numbers
import time
import numpy as np
from sklearn.utils import safe_indexing
from sklearn.base import is_classifier, clone
from sklearn.metrics.scorer import check_scoring
from sklearn.externals.joblib import Parallel, delayed, logger
from ambra.backports import _num_samples, indexable
from sklearn.cross_validation import check_cv
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer, **params):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test, **params)
else:
score = scorer(estimator, X_test, y_test, **params)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
scorer_params=None):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
scorer_params : dict, optional
Parameters to pass to the scorer. Can be used for sample weights
and sample groups.
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y,
scorer, train, test, verbose,
None, fit_params, scorer_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,
fit_params, scorer_params, return_train_score=False,
return_parameters=False):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like or None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape = (n_train_samples,)
Indices of training samples.
test : array-like, shape = (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
scorer_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust lenght of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
# Same, but take both slices
scorer_params = scorer_params if scorer_params is not None else {}
train_scorer_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__')
and len(v) == n_samples
else v)
for k, v in scorer_params.items()])
test_scorer_params = dict([(k, np.asarray(v)[test]
if hasattr(v, '__len__')
and len(v) == n_samples
else v)
for k, v in scorer_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
test_score = _score(estimator, X_test, y_test, scorer,
**test_scorer_params)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer,
**train_scorer_params)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
|
bsd-2-clause
| 1,737,812,907,472,928,500 | -1,999,852,931,756,893,700 | 35.321705 | 79 | 0.593 | false |
michaelkirk/QGIS
|
tests/src/python/test_qgscolorscheme.py
|
11
|
3130
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsColorScheme.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '25/07/2014'
__copyright__ = 'Copyright 2014, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
from utilities import unittest, TestCase
from qgis.core import QgsColorScheme
from PyQt4.QtGui import QColor
#Make a dummy color scheme for testing
class DummyColorScheme(QgsColorScheme):
def __init__(self, parent=None):
QgsColorScheme.__init__(self)
def schemeName(self):
return "Dummy scheme"
def fetchColors(self,context='', baseColor=QColor()):
if ( context == "testscheme" ):
return [[QColor(255, 255, 0),'schemetest']]
elif baseColor.isValid():
return [[baseColor,'base']]
else:
return [[QColor( 255, 0, 0 ),'red'],[QColor( 0, 255, 0 ), None]]
def clone(self):
return DummyColorScheme()
class TestQgsColorScheme(TestCase):
def testCreateScheme(self):
"""Test creating a new color scheme"""
dummyScheme = DummyColorScheme()
self.assertTrue(dummyScheme)
def testGetSchemeName(self):
"""Test getting color scheme name"""
dummyScheme = DummyColorScheme()
self.assertEqual(dummyScheme.schemeName(), "Dummy scheme")
def testColorsNoBase(self):
"""Test getting colors without passing a base color"""
dummyScheme = DummyColorScheme()
colors = dummyScheme.fetchColors()
self.assertEqual( len(colors), 2 )
self.assertEqual( colors[0][0], QColor( 255, 0, 0 ) )
self.assertEqual( colors[0][1], 'red')
self.assertEqual( colors[1][0], QColor( 0, 255, 0 ) )
self.assertEqual( colors[1][1], None)
def testColorsWithBase(self):
"""Test getting colors with a base color"""
dummyScheme = DummyColorScheme()
testColor = QColor( 0, 0, 255 )
colors = dummyScheme.fetchColors( None, testColor )
self.assertEqual( len(colors), 1 )
self.assertEqual( colors[0][0], testColor )
self.assertEqual( colors[0][1], 'base')
def testColorsWithScheme(self):
"""Test getting colors when specifying a scheme"""
dummyScheme = DummyColorScheme()
colors = dummyScheme.fetchColors( 'testscheme' )
self.assertEqual( len(colors), 1 )
self.assertEqual( colors[0][0], QColor( 255, 255, 0 ) )
self.assertEqual( colors[0][1], 'schemetest')
def testClone(self):
"""Test cloning a color scheme"""
dummyScheme = DummyColorScheme()
colors = dummyScheme.fetchColors()
dummySchemeClone = dummyScheme.clone()
colorsClone = dummySchemeClone.fetchColors()
self.assertEqual( colors, colorsClone )
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| 3,736,011,112,707,077,000 | 3,909,416,319,597,796,400 | 33.777778 | 78 | 0.640575 | false |
kupiakos/pybcd
|
bcdelement.py
|
1
|
3754
|
from common import *
from elements import *
from bcddevice import BCDDevice
class BCDElement:
obj = None
_type = None
_changed = False
_identifier = None
_enum = None
_value = None
fmt = None
def __init__(self, obj, type):
self.obj = obj
self._type = type
self._type_info = element_info(self._type)
self.fmt = self._type_info[1]
self._enum = None
self._value = None
def __hash__(self):
return hash((self._type, tuple(self.value)))
def __str__(self):
return 'BCDElement<%s=%s>' % (self.identifier, str(self.value))
def __repr__(self):
return str(self)
def _find_identifier(self):
self._identifier = None
self._enum = None
cls, fmt, subtype = self._type_info
v = None
if cls == ElementClass.Application:
v = alias_dict[cls][self.obj._type_info[2]].get(subtype)
else:
v = alias_dict[cls].get(subtype)
if v is None:
v = (fmt, 'custom:%x' % int(self._type, 16))
elif len(v) == 3:
self._enum = v[2]
self._identifier = v[1]
def _load_value(self):
self._value = self.obj._nav.value('Element', path='Elements/' + self._type)
@property
def identifier(self):
if self._identifier is None:
self._find_identifier()
return self._identifier
@property
def value(self):
if self._value is None:
self._load_value()
return element_transform[self._type_info[1]][1](self._value)
@value.setter
def value(self, val):
raise NotImplementedError('value setting not done yet')
if self.name in element_transform:
v = element_transform[self.name][0](self, val)
else:
v = val
self._value = v
def dump(self, tab='', verbose=False):
p = print
if self.identifier.startswith('custom:'):
p = printwarn
iv = self.value
if self._enum:
if iv not in self._enum.reverse_mapping:
p = printwarn
else:
iv = self._enum.reverse_mapping[iv]
v = element_transform_str.get(self.fmt, identity)(iv)
vl = None # the value list, if it exists
# handle the first of an objectlist
if isinstance(iv, list) and len(v) > 0:
vl = v[1:]
v = v[0]
# test if the guid exists
if isguid(v):
import random
if v not in self.obj.bcd:
p = printerror
if not verbose:
v = self.obj.bcd.guid_to_known(v)
identifier = self.identifier
if verbose:
identifier = '%s<%s>' % (self.identifier, self._type)
# print the identifier
(printelementname if p is print else p)(
tab + identifier.ljust(DUMP_SPACING + int(verbose)*10),
end='')
if isinstance(v, BCDDevice):
v = v.friendly(verbose)
# print the value (or first value if we're a list)
(p)(v)
if vl:
# do listy stuff
for g in vl:
p = print
if isguid(g):
if g in self.obj.bcd:
if not verbose:
g = self.obj.bcd.guid_to_known(g)
else:
p = printerror
p(tab + ' ' * (DUMP_SPACING + int(verbose)*10) + g)
# END OF LINE.
|
mit
| 694,461,047,590,949,000 | -688,291,536,535,575,400 | 26.807407 | 83 | 0.478689 | false |
ceph/radosgw-agent
|
radosgw_agent/util/log.py
|
1
|
2537
|
import logging
import sys
def get_dev_logger(name='dev.radosgw_agent'):
"""
A simple utility to be able to log things that are meant for developer-eyes
and not for user facing.
All developer logs must be prepended with `dev` so this utility ensures
that is the case. To use it::
dev_log = get_dev_logger(__name__)
Or::
dev_log = get_dev_logger('dev.custom_name')
"""
if not name.startswith('dev'):
return logging.getLogger('%s.%s' % ('dev', name))
return logging.getLogger(name)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': RED,
'ERROR': RED,
'FATAL': RED,
}
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
BASE_COLOR_FORMAT = "%(asctime)s %(process)d\
[$BOLD%(name)s$RESET][%(color_levelname)-17s] %(message)s"
BASE_FORMAT = "%(asctime)s %(process)d [%(name)s][%(levelname)-6s] %(message)s"
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
def color_message(message):
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
return message
class ColoredFormatter(logging.Formatter):
"""
A very basic logging formatter that not only applies color to the levels of
the ouput but will also truncate the level names so that they do not alter
the visuals of logging when presented on the terminal.
"""
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
levelname = record.levelname
truncated_level = record.levelname[:6]
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + truncated_level + RESET_SEQ
record.color_levelname = levelname_color
return logging.Formatter.format(self, record)
def color_format():
"""
Main entry point to get a colored formatter, it will use the
BASE_FORMAT by default and fall back to no colors if the system
does not support it
"""
str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT
color_format = color_message(str_format)
return ColoredFormatter(color_format)
|
mit
| 8,392,796,658,836,838,000 | -6,672,858,117,135,494,000 | 27.505618 | 92 | 0.653134 | false |
jmccrae/wn-rdf
|
stresstest.py
|
1
|
1210
|
import multiprocessing
import sqlite3
import time
import urllib
def do_query(uri):
t1 = time.time()
try:
for line in urllib.urlopen(uri):
pass
print "Got %s in %f" % (uri, time.time() - t1)
except Exception as e:
print "Failed on %s: %s" % (uri, str(e))
if __name__ == "__main__":
conn = sqlite3.connect("wordnet_3.1+.db")
cursor = conn.cursor()
uris = []
cursor.execute("select synsetid, wordid, casedwordid from senses order by random() limit 100")
for synsetid, wordid, casedwordid in cursor.fetchall():
cursor.execute("select pos from synsets where synsetid=?",(synsetid,))
pos, = cursor.fetchone()
if casedwordid:
cursor.execute("select cased from casedwords where casedwordid=?", (casedwordid,))
word, = cursor.fetchone()
else:
cursor.execute("select lemma from words where wordid=?", (wordid,))
word, = cursor.fetchone()
uris += ["http://localhost:8051/wn31-%09d-%s" % (synsetid, pos)]
uris += ["http://localhost:8051/%s-%s" % (word, pos)]
print "Starting to query"
pool = multiprocessing.Pool(20)
pool.map(do_query, uris)
|
bsd-3-clause
| -489,701,647,061,013,700 | -6,254,677,808,290,215,000 | 33.571429 | 98 | 0.6 | false |
tectronics/mythbox
|
resources/test/mythboxtest/mythtv/test_domain.py
|
5
|
34567
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import datetime
import time
import unittest2 as unittest
import mythboxtest
import copy
from mockito import Mock, when, verify, any
from mythbox.settings import MythSettings
from mythbox.mythtv import protocol
from mythbox.mythtv.protocol import protocols
from mythbox.mythtv.domain import ctime2MythTime, dbTime2MythTime, Backend, \
Channel, CommercialBreak, Job, UserJob, TVProgram, Program, RecordedProgram, \
RecordingSchedule, Tuner, StatusException, frames2seconds, seconds2frames
from mythbox.mythtv.enums import CheckForDupesIn, CheckForDupesUsing, FlagMask, \
EpisodeFilter, JobStatus, JobType
from mythbox.platform import Platform
log = mythboxtest.getLogger('mythbox.unittest')
def pdata(pdict={}, protocolVersion=56):
'''
make creating fake program data easy with a sparse dict
in : {'title':'Seinfeld', 'description':'Real funny!'}
out: [] of data to pass into RecordedProgram constructor
'''
p = protocols[protocolVersion]
d = ['0'] * p.recordSize()
for k,v in pdict.items():
try:
d[p.recordFields().index(k)] = v
except ValueError:
log.warning('%s is not valid field in protocol %d' % (k,protocolVersion))
return d
def socketTime(h, m, s):
# return raw value that myth passes over socket for date=today and time=h,m,s (in local timezone)
return time.mktime(datetime.datetime.combine(datetime.date.today(), datetime.time(h,m,s)).timetuple())
def socketDateTime(year, month, day, h, m, s):
return time.mktime(datetime.datetime.combine(datetime.date(year, month, day), datetime.time(h,m,s)).timetuple())
class ModuleFunctionsTest(unittest.TestCase):
def test_ctime2MythTime_MinDateStringReturnsMinDate(self):
t = ctime2MythTime('0')
log.debug('MythTime = %s' % t)
self.assertEqual('19691231180000', t)
def test_ctime2MythTime_MinDateIntReturnsMinDate(self):
t = ctime2MythTime(0)
log.debug('MythTime = %s' % t)
self.assertEqual('19691231180000', t)
def test_ctime2MythTime_BadInputRaisesValueError(self):
# PLATFORM ISSUE: Throws exception on windows but returns 19691231175959 on linux
try:
t = ctime2MythTime(-1)
log.warn('Expected failure for time = -1 : %s' % t)
except ValueError, ve:
log.debug('Pass: %s' % ve)
def test_dbTime2MythTime_ShouldConvertTimeDeltaToString(self):
td = datetime.timedelta(seconds=1000)
mt = dbTime2MythTime(td)
log.debug('MythTime = %s' % mt)
self.assertEqual('001640', mt)
def test_frames2seconds(self):
s = frames2seconds(1000, 29.97)
log.debug('1000 frames @ 29.97fps = %s seconds' % s)
self.assertEqual(33.37, s)
s = frames2seconds(0, 29.97)
log.debug('0 frames @ 29.97fps = %s seconds' % s)
self.assertEqual(0.0, s)
s = frames2seconds(99999999L, 29.97)
log.debug('99999999L frames @ 29.97fps = %s seconds' % s)
self.assertEqual(3336669.97, s)
def test_seconds2frames(self):
s = seconds2frames(33.37, 29.97)
log.debug('33.37 seconds @ 29.97fps = %s frames' % s)
self.assertEqual(1000L, s)
s = seconds2frames(0, 29.97)
log.debug('0 seconds @ 29.97fps = %s frames' % s)
self.assertEqual(0L, s)
s = seconds2frames(3336669.97, 29.97)
log.debug('3336669.97 seconds @ 29.97fps = %s frames' % s)
self.assertEqual(99999999L, s)
class CheckForDupesUsingTest(unittest.TestCase):
def test_access_to_static_constants_works(self):
self.assertEqual(145, CheckForDupesUsing.translations[CheckForDupesUsing.NONE])
class ProgramTest(unittest.TestCase):
def setUp(self):
self.translator = Mock()
def test_constructor(self):
p = Program(self.translator)
self.assertFalse(p is None)
class ChannelTest(unittest.TestCase):
def test_constructor(self):
channel = Channel({'chanid':9, 'channum':'23_1', 'callsign':'WXYZ', 'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4})
log.debug(channel)
self.assertTrue(channel)
def test_constructor_IconMissing(self):
channel = Channel({'chanid':9, 'channum':'23_1', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4})
log.debug(channel)
self.assertTrue(channel.getIconPath() is None)
def test_getSortableChannelNumber_When_channel_number_is_already_sortable_Then_return_channel_number(self):
channel = Channel({'chanid':9, 'channum':'23', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4})
log.debug('Sortable channel number = %s' % channel.getSortableChannelNumber())
self.assertEqual(23, channel.getSortableChannelNumber())
def test_getSortableChannelNumber_When_channel_number_contains_underscore_Then_return_channel_number_as_float(self):
number = Channel({'chanid':9, 'channum':'23_4', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}).getSortableChannelNumber()
log.debug('Sortable channel number = %s' % number)
self.assertEqual(23.4, number)
def test_getSortableChannelNumber_When_channel_number_contains_dot_Then_return_channel_number_as_float(self):
number = Channel({'chanid':9, 'channum':'23.4', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}).getSortableChannelNumber()
log.debug('Sortable channel number = %s' % number)
self.assertEqual(23.4, number)
def test_getSortableChannelNumber_When_channel_number_doesnt_seem_like_a_number_Then_return_channel_id(self):
number = Channel({'chanid':9, 'channum':'23/4', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}).getSortableChannelNumber()
log.debug('Sortable channel number = %s' % number)
self.assertEqual(9, number)
class TVProgramTest(unittest.TestCase):
def setUp(self):
self.data = {
'title' : 'Bonanza',
'subtitle' : 'The Shootout',
'description' : 'Yee haw!',
'starttime' : datetime.datetime(2008, 11, 21, 14),
'endtime' : datetime.datetime(2008, 11, 21, 14),
'channum' : '23',
'hdtv' : True
}
self.translator = Mock()
self.platform = Platform()
self.protocol = protocol.Protocol23056()
self.settings = MythSettings(self.platform, self.translator)
def test_constructor(self):
program = TVProgram(self.data, self.translator)
self.assertTrue(program is not None)
self.assertTrue(program.isHD())
def test_starttimeAsTime(self):
program = TVProgram(self.data, self.translator)
time = program.starttimeAsTime()
log.debug('startTime = %s' % time)
self.assertTrue(time)
def test_starttime_TypeInDataDictIsADateTime(self):
p = TVProgram({'starttime': datetime.datetime(2008, 11, 21, 14)}, self.translator)
self.assertEqual('20081121140000', p.starttime())
def test_eq_Make_sure_bidirectional_equivalence_to_RecordedProgram_works(self):
tv = TVProgram(self.data, self.translator)
recorded = RecordedProgram(pdata({'channum':'23','starttime': socketDateTime(2008, 11, 21, 14, 0, 0)}), Mock(), Mock(), Mock(), self.protocol, Mock())
self.assertTrue(tv == recorded)
self.assertTrue(recorded == tv)
self.assertTrue(tv in [recorded])
self.assertTrue(recorded in [tv])
self.assertTrue({tv:tv}.has_key(recorded))
self.assertTrue({recorded:recorded}.has_key(tv))
class RecordedProgramTest(unittest.TestCase):
def setUp(self):
self.conn = Mock()
self.settings = Mock()
self.translator = Mock()
self.platform = Mock()
self.protocol = protocol.Protocol23056()
self.pkwargs = {'settings':self.settings, 'translator': self.translator, 'platform':self.platform, 'protocol':self.protocol, 'conn':self.conn}
def test_hashable(self):
p1 = RecordedProgram(pdata({'channum':'99', 'starttime':999999}), **self.pkwargs)
p2 = RecordedProgram(pdata({'channum':'101', 'starttime':888888}), **self.pkwargs)
mydict = {p1:'p1', p2:'p2'}
self.assertTrue(p1 in mydict)
self.assertTrue(p2 in mydict)
self.assertEqual('p1', mydict[p1])
self.assertEqual('p2', mydict[p2])
def test_hasBookmark_False(self):
p = RecordedProgram(pdata(), **self.pkwargs)
p.setProgramFlags(FlagMask.FL_AUTOEXP)
self.assertFalse(p.isBookmarked())
self.assertTrue(p.isAutoExpire())
def test_hasBookmark_True(self):
p = RecordedProgram(pdata(), **self.pkwargs)
p.setProgramFlags(FlagMask.FL_BOOKMARK | FlagMask.FL_AUTOEXP)
self.assertTrue(p.isBookmarked())
self.assertTrue(p.isAutoExpire())
def test_hasCommercials_True(self):
p = RecordedProgram(pdata({'programflags':FlagMask.FL_COMMFLAG | FlagMask.FL_AUTOEXP}), **self.pkwargs)
commBreaks = []
commBreaks.append(CommercialBreak(120,180))
when(self.conn).getCommercialBreaks(p).thenReturn(commBreaks)
log.debug('comms = %s' % len(p.getCommercials()))
self.assertTrue(p.hasCommercials())
#verify(self.conn).getCommercialBreaks(p)
def test_hasCommercials_False(self):
p = RecordedProgram(pdata({'programflags':FlagMask.FL_COMMFLAG | FlagMask.FL_AUTOEXP}), **self.pkwargs)
commBreaks = []
when(self.conn).getCommercialBreaks(p).thenReturn(commBreaks)
log.debug('comms = %s' % len(p.getCommercials()))
self.assertFalse(p.hasCommercials())
def test_getCommercials_ReturnsOneCommercial(self):
p = RecordedProgram(pdata({'programflags':FlagMask.FL_COMMFLAG | FlagMask.FL_AUTOEXP}), **self.pkwargs)
commBreaks = []
commBreaks.append(CommercialBreak(120,180))
when(self.conn).getCommercialBreaks(p).thenReturn(commBreaks)
result = p.getCommercials()
log.debug('commercials = %s'%result)
self.assertEqual(commBreaks, result)
verify(self.conn).getCommercialBreaks(p)
def test_eq_True_self(self):
p = RecordedProgram(pdata({'channum':'99', 'starttime':999999}), **self.pkwargs)
self.assertEqual(p, p)
def test_eq_True_same_channelId_and_startttime(self):
data = pdata({'channum':'99', 'starttime':999999})
p1 = RecordedProgram(data, **self.pkwargs)
p2 = RecordedProgram(data[:], **self.pkwargs)
self.assertEqual(p1, p2)
self.assertEqual(p2, p1)
def test_eq_False_different_channelNumber_and_startttime(self):
p1 = RecordedProgram(pdata({'channum':'11', 'starttime':999999}), **self.pkwargs)
p2 = RecordedProgram(pdata({'channum':'101', 'starttime':777777}), **self.pkwargs)
self.assertNotEquals(p1, p2)
self.assertNotEquals(p2, p1)
def test_eq_False_different_channelNumber_same_startttime(self):
p1 = RecordedProgram(pdata({'channum':'99', 'starttime':999999}), **self.pkwargs)
p2 = RecordedProgram(pdata({'channum':'101', 'starttime':999999}), **self.pkwargs)
self.assertNotEquals(p1, p2)
self.assertNotEquals(p2, p1)
def test_formattedAirTime(self):
# 9:00pm 9:30pm
p = RecordedProgram(pdata({'starttime':socketTime(21, 0, 0), 'endtime':socketTime(21, 30, 0)}), **self.pkwargs)
self.assertEqual('9:00 - 9:30PM', p.formattedAirTime(short=False))
self.assertEqual('9 - 9:30PM', p.formattedAirTime(short=True))
self.assertEqual('9 - 9:30PM', p.formattedAirTime())
def test_getDuration_When_duration_is_half_hour_Then_return_30mins(self):
# 6:30pm 7:00pm
self.assertEqual(30, RecordedProgram(pdata({'starttime':socketTime(18, 30, 0), 'endtime':socketTime(19, 0, 0)}), **self.pkwargs).getDuration())
def test_getDuration_When_2_hour_duration_spans_midnight_into_next_day_Then_return_120mins(self):
# 10/10/2009 11pm 10/11/2009 1am
self.assertEqual(120, RecordedProgram(pdata({'starttime':socketDateTime(2009, 10, 10, 23, 0, 0), 'endtime':socketDateTime(2009, 10, 11, 1, 0, 0)}), **self.pkwargs).getDuration())
def test_getDuration_When_start_and_end_times_same_Then_return_0mins(self):
self.assertEqual(0, RecordedProgram(pdata({'starttime': socketTime(18, 30, 0), 'endtime': socketTime(18, 30, 0)}), **self.pkwargs).getDuration())
def test_formattedStartTime_1pm(self):
s = RecordedProgram(pdata({'starttime':socketTime(13, 0, 0)}), self.settings, self.translator, self.platform, self.protocol, self.conn).formattedStartTime()
log.debug('startime = %s' % s)
self.assertEqual('1:00 PM', s)
def test_formattedDuration(self):
data = [
{'start' : socketTime(18, 30, 0), 'end' : socketTime(20, 30, 0), 'expected' : '2 hrs'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(19, 30, 0), 'expected' : '1 hr'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(18, 31, 0), 'expected' : '1 m'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(19, 0, 0), 'expected' : '30 m'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(20, 0, 0), 'expected' : '1 hr 30 m'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(21, 0, 0), 'expected' : '2 hrs 30 m'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(19, 31, 0), 'expected' : '1 hr 1 m'},
{'start' : socketTime(18, 30, 0), 'end' : socketTime(20, 31, 0), 'expected' : '2 hrs 1 m'}]
for d in data:
s = RecordedProgram(pdata({'starttime':d['start'], 'endtime':d['end']}), **self.pkwargs).formattedDuration()
log.debug('Duration = %s' % s)
self.assertEqual(d['expected'], s)
def test_originalAirDate_When_missing_Returns_None(self):
rp = RecordedProgram(pdata({'airdate':'','hasairdate':0}), **self.pkwargs)
self.assertFalse(rp.hasOriginalAirDate())
self.assertEqual('', rp.originalAirDate())
def test_originalAirDate_When_available_Returns_date_as_string(self):
rp = RecordedProgram(pdata({'airdate': '2008-10-10', 'hasairdate':1}), **self.pkwargs)
self.assertEqual('2008-10-10', rp.originalAirDate())
self.assertTrue(rp.hasOriginalAirDate())
class TunerTest(unittest.TestCase):
def setUp(self):
self.db = Mock()
self.conn = Mock()
self.translator = Mock()
self.domainCache = Mock()
self.tuner = Tuner(4, 'mrbun', 1000, 6000, 'HDHOMERUN', self.domainCache, self.conn, self.db, self.translator)
def test_toString(self):
log.debug('tuner = %s'%self.tuner)
self.assertFalse(self.tuner is None)
def test_isWatchingOrRecording_CardIdle(self):
when(self.conn).getTunerShowing('Seinfeld').thenReturn(-1)
self.assertFalse(self.tuner.isWatchingOrRecording('Seinfeld'))
def test_isWatchingOrRecording_CardNotIdleButShowDoesntMatch(self):
when(self.conn).getTunerShowing('Seinfeld').thenReturn(-1)
self.assertFalse(self.tuner.isWatchingOrRecording('Seinfeld'))
def test_isWatchingOrRecording_CardNotIdleAndShowMatches(self):
when(self.conn).getTunerShowing('Seinfeld').thenReturn(self.tuner.tunerId)
self.assertTrue(self.tuner.isWatchingOrRecording('Seinfeld'))
def test_isRecording_True(self):
when(self.conn).isTunerRecording(any()).thenReturn(True)
result = self.tuner.isRecording()
log.debug('isRecording_True = %s'%result)
self.assertTrue(result)
verify(self.conn).isTunerRecording(any())
def test_isRecording_False(self):
when(self.conn).isTunerRecording(any()).thenReturn(False)
self.assertFalse(self.tuner.isRecording())
verify(self.conn).isTunerRecording(any())
def test_hasChannel_True(self):
channels = []
for x in range(0,5):
channels.append(Channel(
{'chanid':x, 'channum':'%d'%x, 'callsign':'WXYZ',
'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4}))
when(self.domainCache).getChannels().thenReturn(channels)
self.assertTrue(self.tuner.hasChannel(Channel(dict(channum='3'))))
def test_hasChannel_False(self):
channels = []
for x in range(0,5):
channels.append(Channel(
{'chanid':x, 'channum':'%d'%x, 'callsign':'WXYZ',
'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4}))
when(self.domainCache).getChannels().thenReturn(channels)
self.assertFalse(self.tuner.hasChannel(Channel(dict(channum='6'))))
def test_getChannels_CachingWorks(self):
channels = []
for x in range(0,5):
channels.append(Channel(
{'chanid':x, 'channum':'%d'%x, 'callsign':'WXYZ',
'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4}))
when(self.domainCache).getChannels().thenReturn(channels)
for x in range(10):
channels = self.tuner.getChannels()
verify(self.domainCache, 1).getChannels()
class CommercialBreakTest(unittest.TestCase):
def test_constructor(self):
commercial = CommercialBreak(100, 200)
self.assertTrue(commercial is not None)
def test_constructor_StartAfterEndFailsAssertion(self):
try:
CommercialBreak(200, 100)
except AssertionError, ae:
log.debug('Error = %s' % ae)
def test_isDuring_True(self):
commercial = CommercialBreak(100, 200)
self.assertTrue(commercial.isDuring(150))
def test_isDuring_BeforeCommercialReturnsFalse(self):
commercial = CommercialBreak(100, 200)
self.assertFalse(commercial.isDuring(50))
def test_isDuring_AfterCommercialReturnsFalse(self):
commercial = CommercialBreak(100, 200)
self.assertFalse(commercial.isDuring(350))
class RecordingScheduleTest(unittest.TestCase):
def test_starttime_DataFromNativeMySQL(self):
data = {'starttime': datetime.timedelta(seconds=(1 * 60 * 60) + (2 * 60) + 3)}
schedule = RecordingSchedule(data, Mock())
self.assertEqual('010203', schedule.starttime())
def test_endtime_DataFromNativeMySQL(self):
data = {'endtime': datetime.timedelta(seconds=(1 * 60 * 60) + (2 * 60) + 3)}
schedule = RecordingSchedule(data, Mock())
self.assertEqual('010203', schedule.endtime())
def test_startdate_DataFromNativeMySQL(self):
data = {'startdate': datetime.date(2008, 11, 12)}
schedule = RecordingSchedule(data, Mock())
self.assertEqual('20081112', schedule.startdate())
def test_enddate_DataFromNativeMySQL(self):
data = {'enddate': datetime.date(2008, 11, 12)}
schedule = RecordingSchedule(data, Mock())
self.assertEqual('20081112', schedule.enddate())
def test_episodeFilter_and_checkForDupesIn_read_from_and_written_to_dupin_field_correctly(self):
data = {'dupin': CheckForDupesIn.ALL_RECORDINGS | EpisodeFilter.EXCLUDE_REPEATS_AND_GENERICS}
schedule = RecordingSchedule(data, Mock())
self.assertEqual(EpisodeFilter.EXCLUDE_REPEATS_AND_GENERICS, schedule.getEpisodeFilter())
schedule.setEpisodeFilter(EpisodeFilter.NEW_EPISODES_ONLY)
self.assertEqual(EpisodeFilter.NEW_EPISODES_ONLY, schedule.getEpisodeFilter())
self.assertEqual(CheckForDupesIn.ALL_RECORDINGS, schedule.getCheckForDupesIn())
schedule.setCheckForDupesIn(CheckForDupesIn.PREVIOUS_RECORDINGS)
self.assertEqual(EpisodeFilter.NEW_EPISODES_ONLY, schedule.getEpisodeFilter())
self.assertEqual(CheckForDupesIn.PREVIOUS_RECORDINGS, schedule.getCheckForDupesIn())
schedule.setEpisodeFilter(EpisodeFilter.NONE)
self.assertEqual(EpisodeFilter.NONE, schedule.getEpisodeFilter())
self.assertEqual(CheckForDupesIn.PREVIOUS_RECORDINGS, schedule.getCheckForDupesIn())
def test_hashable(self):
s1 = RecordingSchedule({'recordid' : 1}, Mock())
s2 = RecordingSchedule({'recordid' : 2}, Mock())
d = {s1:'schedule1',}
self.assertIn(s1, d)
self.assertEqual('schedule1', d[s1])
self.assertNotIn(s2, d)
class UserJobTest(unittest.TestCase):
def test_isActive_When_command_not_none_Then_return_true(self):
self.assertTrue(UserJob(JobType.USERJOB1, 'Send to Ipad', 'HandBrakeCLI blah blal blah').isActive())
def test_isActive_When_command_empty_The_return_false(self):
self.assertFalse(UserJob(JobType.USERJOB1, 'Send to Ipad', '').isActive())
def test_isActive_When_command_none_Then_return_false(self):
self.assertFalse(UserJob(JobType.USERJOB1, 'Send to Ipad', None).isActive())
class JobTest(unittest.TestCase):
def setUp(self):
self.translator = Mock()
self.protocol = protocol.Protocol56()
def test_moveToFrontOfQueue_Raises_Exeption_When_Job_Not_Queued(self):
job = self.createJob(jobStatus=JobStatus.FINISHED)
try:
job.moveToFrontOfQueue()
except StatusException, se:
log.debug(se)
self.assertTrue('queue' in str(se))
def test_moveToFrontOfQueue_From_10_Of_10(self):
# Setup
db = Mock()
conn = Mock()
jobs = []
for i in xrange(1, 11):
job = self.createJob(conn=conn, db=db, id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG)
job.scheduledRunTime = datetime.datetime.now().replace(year=(2000+i))
jobs.append(job)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
job = copy.copy(jobs[-1])
# Test
job.moveToFrontOfQueue()
# Verify
for i, j in enumerate(jobs[:-1]):
log.debug('job %s = %s' % (i, j))
self.assertTrue(2000 + (i+2), j.scheduledRunTime.year)
log.debug('current job = %s' % job)
self.assertTrue(2001, job.scheduledRunTime.year)
def test_moveToFrontOfQueue_From_5_Of_10(self):
# Setup
jobPos = 4 # zero based index
db = Mock()
conn = Mock()
jobs = []
for i in xrange(1, 11):
job = self.createJob(conn=conn, db=db, id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG)
job.scheduledRunTime = datetime.datetime.now().replace(year=(2000+i))
jobs.append(job)
#log.debug('%s' % job)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
job = copy.copy(jobs[jobPos])
# Test
job.moveToFrontOfQueue()
# Verify
# pushed back [1:4]
for i, j in enumerate(jobs[:jobPos]):
log.debug('job %s = %s' % (i, j))
self.assertTrue(2000 + (i+2), j.scheduledRunTime.year)
# moved to first in queue
log.debug('current job = %s' % job)
self.assertTrue(2001, job.scheduledRunTime.year)
# unaffected jobs [5,10]
for i, j in enumerate(jobs[jobPos+1:]):
log.debug('job %s = %s' % (i, j))
self.assertTrue(2000 + (i+2), j.scheduledRunTime.year)
def test_moveToFrontOfQueue_From_2_Of_2(self):
# Setup
db = Mock()
conn = Mock()
jobs = []
for i in xrange(1, 3):
job = self.createJob(conn=conn, db=db, id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG)
job.scheduledRunTime = datetime.datetime.now().replace(year=(2000+i))
jobs.append(job)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
job = copy.copy(jobs[1])
# Test
job.moveToFrontOfQueue()
# Verify
for i, j in enumerate(jobs[:-1]):
log.debug('job %s = %s' % (i, j))
self.assertEqual(2000 + (i+2), j.scheduledRunTime.year)
log.debug('current job = %s' % job)
self.assertEqual(2001, job.scheduledRunTime.year)
def test_getPositionInQueue_Position_Is_7_of_10(self):
# Setup
db = Mock()
conn = Mock()
jobs = []
for i in xrange(1, 11):
jobs.append(self.createJob(id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG))
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs)
job = self.createJob(conn=conn, db=db, id=7, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG)
# Test
pos, numJobs = job.getPositionInQueue()
# Verify
log.debug('Job is %d of %d' % (pos, numJobs))
self.assertEqual(7, pos)
self.assertEqual(10, numJobs)
def test_getPositionInQueue_Position_Is_1_of_1(self):
# Setup
db = Mock()
conn = Mock()
job = self.createJob(conn=conn, db=db, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG)
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn([job])
when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn([job])
# Test
pos, numJobs = job.getPositionInQueue()
# Verify
log.debug('Job is %d of %d' % (pos, numJobs))
self.assertEqual(1, pos)
self.assertEqual(1, numJobs)
def test_getPositionInQueue_RaisesException_JobStatus_Not_Queued(self):
when(self.translator).get(JobStatus.translations[JobStatus.FINISHED]).thenReturn('Finished')
job = self.createJob(jobStatus=JobStatus.FINISHED)
try:
job.getPositionInQueue()
self.fail('Expected StatusException since Finished jobs should not be in the queue')
except StatusException, se:
log.debug(se)
self.assertTrue('Finished' in str(se))
def test_getPercentComplete_Finished_Job_Returns_100(self):
job = self.createJob(jobStatus=JobStatus.FINISHED)
self.assertEqual(100, job.getPercentComplete())
def test_getPercentComplete_Pending_Job_Returns_0(self):
job = self.createJob(jobStatus=JobStatus.PENDING)
self.assertEqual(0, job.getPercentComplete())
def test_getPercentComplete_Running_Job_Returns_57(self):
job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG)
job.comment = "76% Completed @ 13.9645 fps."
self.assertEqual(76, job.getPercentComplete())
def test_getPercentComplete_Raises_StatusException_WhenRunningButPercentCompletionNotAvailableYet(self):
job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG)
job.comment = "Logo detection"
try:
job.getPercentComplete()
except StatusException, se:
log.debug("%s" % se)
def test_getCommFlagRate_Running_Job_Returns_FPS(self):
job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG)
job.comment = "76% Completed @ 13.9645 fps."
rate = job.getCommFlagRate()
log.debug('Comm flag rate = %s' % rate)
self.assertAlmostEqual(13.9645, rate)
def test_getCommFlagRate_Raises_StatusException_WhenRunningButCommFlagRateNotAvailableYet(self):
job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG)
job.comment = "Logo detection"
try:
job.getCommFlagRate()
except StatusException, se:
log.debug("%s" % se)
def test_str_ShouldConvertToString(self):
when(self.translator).get(JobStatus.translations[JobStatus.QUEUED]).thenReturn('Queued')
when(self.translator).get(JobType.translations[JobType.SYSTEMJOB]).thenReturn('System')
s = "%s"%self.createJob(jobStatus=JobStatus.QUEUED, jobType=JobType.SYSTEMJOB)
log.debug('job = %s' % s)
self.assertTrue('System' in s)
self.assertTrue('Queued' in s)
def test_isJobFor_ShouldReturnTrue(self):
# Setup
job = self.createJob()
job.startTime = datetime.datetime(2009, 12, 5, 10, 20, 00)
job.channelId = 1999
data = [''] * self.protocol.recordSize()
data[4] = 1999
data[11] = time.mktime(datetime.datetime(2009, 12, 5, 10, 20, 00).timetuple())
program = RecordedProgram(data=data, settings=Mock(), translator=Mock(), platform=Mock(), protocol=self.protocol, conn=Mock())
# Test & verify
self.assertTrue(job.isJobFor(program))
def test_isJobFor_ShouldReturnFalse_TimesDontMatch(self):
# Setup
job = self.createJob()
job.startTime = datetime.datetime(2008, 11, 4, 23, 45, 00)
job.channelId = 1999
data = [''] * self.protocol.recordSize()
data[4] = 1999
data[11] = time.mktime(datetime.datetime(2009, 12, 5, 10, 20, 00).timetuple())
program = RecordedProgram(data=data, settings=Mock(), translator=Mock(), platform=Mock(), protocol=self.protocol, conn=Mock())
# Test & verify
self.assertFalse(job.isJobFor(program))
def test_isJobFor_ShouldReturnFalse_ChannelIds_DontMatch(self):
# Setup
job = self.createJob()
job.startTime = datetime.datetime(2008, 11, 4, 23, 45, 00)
job.channelId = 200
data = [''] * self.protocol.recordSize()
data[4] = 1999
data[11] = time.mktime(datetime.datetime(2008, 11, 4, 23, 45, 00).timetuple())
program = RecordedProgram(data=data, settings=Mock(), translator=Mock(), platform=Mock(), protocol=self.protocol, conn=Mock())
# Test & verify
self.assertFalse(job.isJobFor(program))
def test_eq_TrueForSameObjectInstance(self):
job = self.createJob()
self.assertTrue(job == job)
def test_eq_TrueForJobsWithTheSameId(self):
job1 = self.createJob(id=99)
job2 = self.createJob(id=99)
self.assertTrue(job1 == job2)
def test_eq_FalseForJobsWithDifferentIds(self):
job1 = self.createJob(id=99)
job2 = self.createJob(id=100)
self.assertFalse(job1 == job2)
def test_eq_FalseForInvalidType(self):
job1 = self.createJob(id=99)
job2 = "i am not of type Job"
self.assertFalse(job1 == job2)
def test_eq_FalseForNone(self):
job1 = self.createJob(id=99)
job2 = None
self.assertFalse(job1 == job2)
def test_isUserJob(self):
self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB1).isUserJob())
self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB2).isUserJob())
self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB3).isUserJob())
self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB4).isUserJob())
self.assertFalse(self.createJob(jobType=JobType.COMMFLAG).isUserJob())
self.assertFalse(self.createJob(jobType=JobType.SYSTEMJOB).isUserJob())
self.assertFalse(self.createJob(jobType=JobType.TRANSCODE).isUserJob())
def createJob(self, conn=Mock(), db=Mock(), domainCache=Mock(), id=1, jobType=JobType.COMMFLAG, jobStatus=JobStatus.FINISHED):
return Job(
id=id,
channelId=2,
startTime=None,
insertTime=None,
jobType=jobType,
cmds=None,
flags=None,
jobStatus=jobStatus,
statusTime=None,
hostname='localhost',
comment=None,
scheduledRunTime=None,
translator=self.translator,
conn=conn,
db=db,
domainCache=domainCache)
class BackendTest(unittest.TestCase):
def test_eq_True_by_reference(self):
be = Backend('htpc', '127.0.0.1', '6543', True)
self.assertTrue(be == be)
def test_eq_by_value(self):
bes = [Backend('htpc', '127.0.0.1', '6543', True),
Backend('htpc', '127.0.0.1', '6543', False),
Backend('htpc', '127.0.0.1', '8888', True),
Backend('htpc', '127.0.0.2', '6543', True),
Backend('slave', '127.0.0.1', '6543', True)]
for i, be1 in enumerate(bes):
for j, be2 in enumerate(bes):
if i == j:
self.assertTrue(be1 == be2)
else:
self.assertFalse(be1 == be2)
def test_eq_False_by_type(self):
self.assertFalse(Backend('slave', '127.0.0.1', '6543', True) == 'a string')
self.assertFalse(Backend('slave', '127.0.0.1', '6543', True) == None)
if __name__ == '__main__':
import logging.config
logging.config.fileConfig('mythbox_log.ini')
unittest.main()
|
gpl-2.0
| -5,593,516,536,011,099,000 | 5,605,676,289,220,795,000 | 41.622688 | 186 | 0.622067 | false |
Pablo126/SSBW
|
Tarea4/tarea4/lib/python3.5/site-packages/setuptools/namespaces.py
|
99
|
3181
|
import os
from distutils import log
import itertools
from six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def uninstall_namespaces(self):
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
if not os.path.exists(filename):
return
log.info("Removing %s", filename)
os.remove(filename)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
"importlib = has_mfs and __import__('importlib.util')",
"has_mfs and __import__('importlib.machinery')",
"m = has_mfs and "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
"[os.path.dirname(p)])))",
"m = m or "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
|
gpl-3.0
| -2,179,863,759,628,758,500 | -7,937,007,166,438,748,000 | 28.728972 | 73 | 0.549198 | false |
jomolinare/kobocat
|
onadata/apps/main/migrations/0007_replace_special_chars_and_whitespace_in_usernames.py
|
13
|
7266
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
import re
class Migration(DataMigration):
def forwards(self, orm):
def update_username_for_user(user, match_on, sub_with):
regex = re.compile(match_on)
if regex.search(user.username):
user.username = regex.sub(sub_with, user.username)
user.save()
# remove whitespace and replace special chars with underscores
for user in orm['auth.User'].objects.all():
update_username_for_user(user, "\s", "")
update_username_for_user(user, "\W", "_")
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.metadata': {
'Meta': {'object_name': 'MetaData'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'data_file_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.XForm']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'home_page': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['main']
|
bsd-2-clause
| 5,093,569,678,623,629,000 | 5,733,095,847,560,432,000 | 69.543689 | 182 | 0.546518 | false |
hpcloud-mon/tempest
|
tempest/api/compute/servers/test_servers.py
|
4
|
5628
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.compute import base
from tempest import test
class ServersTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
def tearDown(self):
self.clear_servers()
super(ServersTestJSON, self).tearDown()
@test.attr(type='gate')
@test.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
def test_create_server_with_admin_password(self):
# If an admin password is provided on server creation, the server's
# root password should be set to that password.
server = self.create_test_server(adminPass='testpassword')
# Verify the password is set correctly in the response
self.assertEqual('testpassword', server['adminPass'])
@test.attr(type='gate')
@test.idempotent_id('8fea6be7-065e-47cf-89b8-496e6f96c699')
def test_create_with_existing_server_name(self):
# Creating a server with a name that already exists is allowed
# TODO(sdague): clear out try, we do cleanup one layer up
server_name = data_utils.rand_name('server')
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id1 = server['id']
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id2 = server['id']
self.assertNotEqual(id1, id2, "Did not create a new server")
server = self.client.get_server(id1)
name1 = server['name']
server = self.client.get_server(id2)
name2 = server['name']
self.assertEqual(name1, name2)
@test.attr(type='gate')
@test.idempotent_id('f9e15296-d7f9-4e62-b53f-a04e89160833')
def test_create_specify_keypair(self):
# Specify a keypair while creating a server
key_name = data_utils.rand_name('key')
self.keypairs_client.create_keypair(key_name)
self.keypairs_client.list_keypairs()
server = self.create_test_server(key_name=key_name)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
server = self.client.get_server(server['id'])
self.assertEqual(key_name, server['key_name'])
def _update_server_name(self, server_id, status):
# The server name should be changed to the the provided value
new_name = data_utils.rand_name('server')
# Update the server with a new name
self.client.update_server(server_id,
name=new_name)
self.client.wait_for_server_status(server_id, status)
# Verify the name of the server has changed
server = self.client.get_server(server_id)
self.assertEqual(new_name, server['name'])
return server
@test.attr(type='gate')
@test.idempotent_id('5e6ccff8-349d-4852-a8b3-055df7988dd2')
def test_update_server_name(self):
# The server name should be changed to the the provided value
server = self.create_test_server(wait_until='ACTIVE')
self._update_server_name(server['id'], 'ACTIVE')
@test.attr(type='gate')
@test.idempotent_id('6ac19cb1-27a3-40ec-b350-810bdc04c08e')
def test_update_server_name_in_stop_state(self):
# The server name should be changed to the the provided value
server = self.create_test_server(wait_until='ACTIVE')
self.client.stop(server['id'])
self.client.wait_for_server_status(server['id'], 'SHUTOFF')
updated_server = self._update_server_name(server['id'], 'SHUTOFF')
self.assertNotIn('progress', updated_server)
@test.attr(type='gate')
@test.idempotent_id('89b90870-bc13-4b73-96af-f9d4f2b70077')
def test_update_access_server_address(self):
# The server's access addresses should reflect the provided values
server = self.create_test_server(wait_until='ACTIVE')
# Update the IPv4 and IPv6 access addresses
self.client.update_server(server['id'],
accessIPv4='1.1.1.1',
accessIPv6='::babe:202:202')
self.client.wait_for_server_status(server['id'], 'ACTIVE')
# Verify the access addresses have been updated
server = self.client.get_server(server['id'])
self.assertEqual('1.1.1.1', server['accessIPv4'])
self.assertEqual('::babe:202:202', server['accessIPv6'])
@test.attr(type='gate')
@test.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb')
def test_create_server_with_ipv6_addr_only(self):
# Create a server without an IPv4 address(only IPv6 address).
server = self.create_test_server(accessIPv6='2001:2001::3')
self.client.wait_for_server_status(server['id'], 'ACTIVE')
server = self.client.get_server(server['id'])
self.assertEqual('2001:2001::3', server['accessIPv6'])
|
apache-2.0
| 158,982,905,351,089,380 | -8,592,180,607,023,987,000 | 41.961832 | 78 | 0.650853 | false |
jchevin/MissionPlanner-master
|
packages/IronPython.StdLib.2.7.4/content/Lib/distutils/command/install_headers.py
|
251
|
1346
|
"""distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
__revision__ = "$Id$"
from distutils.core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
# class install_headers
|
gpl-3.0
| -6,880,490,278,027,322,000 | -2,679,528,332,469,156,000 | 25.392157 | 75 | 0.566122 | false |
acutesoftware/AIKIF
|
aikif/.z_prototype/create_word_lists.py
|
1
|
5833
|
# coding: utf-8
# create_word_lists.py written by Duncan Murray 3/2/2014
# creates a simple list of verbs, nouns and adjectives for
# simple 'bag of words' parsing.
# First implementation uses the following dataset:
# WordNet 3.1 Copyright 2011 by Princeton University.
import os
import sys
from xml.dom.minidom import parse, parseString
import xml
sys.path.append('..//..//aspytk')
#import as_util_data as dat
import lib_data as dat
import lib_file as fle
import xml.etree.ElementTree as ET
ontologyClassificationFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\dbpedia-ontology.owl.bz2.owl.bz2.owl'
ipFolder = 'S://DATA//opendata//datasets//dict//'
opFolder = '..//data//ref//' # os.getcwd()
def SaveListFirstWordOnly(msg, ipfile, opFile):
numRecs = 0
opList = []
rawList = dat.ReadFileToList(ipfile)
for line in rawList:
if line[0:1] != ' ':
numRecs = numRecs + 1
noun = line[0:line.find(' ')]
opList.append(noun)
dat.SaveListToFile(opList, opFile)
print ('Saved ' + str(numRecs) + ' ' + msg)
return numRecs
def ExtractListOfWords():
# ---- get the list of nouns, adverbs and adjectives ----
numRecs = SaveListFirstWordOnly('nouns', ipFolder + 'index.noun', opFolder + 'nounList.txt')
numRecs = SaveListFirstWordOnly('adverbs', ipFolder + 'index.adv', opFolder + 'advList.txt')
numRecs = SaveListFirstWordOnly('adjectives', ipFolder + 'index.adj', opFolder + 'adjList.txt')
# ---- get the list of verbs ---- (need some stemming here)
numRecs = 0
verbList = []
rawList = dat.ReadFileToList(ipFolder + 'index.verb')
for line in rawList:
if line[0:1] != ' ':
numRecs = numRecs + 1
verb = line[0:line.find(' ')]
verbList.append(verb)
verbList.append(verb + 's') # turns play to plays - TODO, use stemming algorithm
verbList.append(verb + 'ed')
dat.SaveListToFile(verbList, opFolder + 'verbList.txt')
print ('Saved ' + str(numRecs) + ' verbs')
def ExtractCat(fname, opFile):
#Look at the wikipedia categories - no - far too detailed
#S:\DATA\opendata\ontology\wikipedia_categories\articlecategories_en.csv.bz2.csv.bz2.csv
#ipFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_en.csv.bz2.csv.bz2.csv'
# ipFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_en.csv.bz2.csv.bz2.csv'
# headFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_head.csv'
# sampleFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_sample.csv'
# dat.unix_head(ipFile, headFile, 5000)
# dat.getPercentRandomRecords(ipFile, sampleFile, 1)
# Later
# infoboxproperties_en.csv.bz2.csv.bz2 = list of properties or labels (name, age, BestBowlerFirstInnings, PilotName, ProducerAge, etc)
# ontology is in RDF format
print(fname + ' = ' + str(dat.countLinesInFile(fname)) + ' rows' )
# dom = xml.dom.minidom.parse( fname ) # parse an XML file
#print (dom1.toxml())
# dom.findall('owl:DatatypeProperty', namespaces=namespaces)
# for node in dom.getElementsByTagName('DatatypeProperty'): # visit every node <bar />
numFound = 0
categories = []
dom = parse( ontologyClassificationFile )
for node in dom.getElementsByTagName('rdfs:label'): # visit every node <bar />
#print (node.toxml())
numFound = numFound + 1
cat = dat.StriptHTMLtags(node.toxml())
print(cat)
categories.append(cat)
#print ('subclasses = ')
#for sub in node.findall('subClassOf'):
# print (sub.toxml())
dat.SaveListToFile(categories, opFile)
return numFound
def GetOntologyExtract(fname, txt):
numFound = 0
# see http://stackoverflow.com/questions/14853243/parsing-xml-with-namespace-in-python-elementtree
namespaces = {'owl': 'http://www.w3.org/2002/07/owl#'} # add more as needed
#namespaces = {'rdfs': 'http://www.w3.org/2000/01/rdf-schema#'}
print('Extracting ' + txt )
tree = ET.parse(fname)
doc = tree.getroot()
#nodes = doc.findall('owl:Class', namespaces=namespaces)
nodes = doc.findall(txt, namespaces=namespaces)
#nodes = doc.findall('rdfs:label', namespaces=namespaces)
print('found ' + str(len(nodes)) + ' nodes\n ' )
for node in nodes:
numFound = numFound + 1
for itm in node.items():
print (itm[1][28:])
#print(node.tail)
# find_in_tree(root,"myNode")
#print(node.attrib)
#print(len(node))
#print(node.get('rdfs'))
#print('node.text= ' + node.text)
#print('node.tag = ' + node.tag)
return numFound
#-----------------------------------------------------------#
# Build a Master Word List #
#-----------------------------------------------------------#
# Idea is to concatenate unique words from all lists and also
# names from contacts or baby names list (+country names, etc)
# to create one huge list of ALL words used in english sentences.
# Then, in a table each word has flags like isVerb, isNoun, isAdv,
# isAdjective, isName, isPerson, isPlace
# Once this is built then ANY sentence can be parsed into a list
# of ids instead of storing strings you store a series of numbers.
# MAIN
opFile = '..\\data\\ref\\ontology_list.txt'
print('create_word_list.py - script to extract lists for AIKIF')
print('Reading - ' + ontologyClassificationFile)
#ExtractListOfWords()
print('Extracted ' + str(ExtractCat(ontologyClassificationFile, opFile)) + ' nodes to ' + opFile)
#print('Found ' + str(GetOntologyExtract(ontologyClassificationFile, 'owl:Class')) + ' nodes')
print('Done..')
|
gpl-3.0
| 4,748,379,829,372,659,000 | -9,130,035,654,488,228,000 | 40.077465 | 138 | 0.640665 | false |
Jet-Streaming/framework
|
deps/v8/test/simdjs/testcfg.py
|
3
|
2290
|
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import sys
from testrunner.local import testsuite
from testrunner.objects import testcase
SIMDJS_SUITE_PATH = ["data", "src"]
class SimdJsTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(SimdJsTestSuite, self).__init__(name, root)
self.testroot = os.path.join(self.root, *SIMDJS_SUITE_PATH)
self.ParseTestRecord = None
def ListTests(self, context):
tests = [
testcase.TestCase(self, 'shell_test_runner'),
]
for filename in os.listdir(os.path.join(self.testroot, 'benchmarks')):
if (not filename.endswith('.js') or
filename in ['run.js', 'run_browser.js', 'base.js']):
continue
name = filename.rsplit('.')[0]
tests.append(
testcase.TestCase(self, 'benchmarks/' + name))
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags +
[os.path.join(self.root, "harness-adapt.js"),
"--harmony", "--harmony-simd",
os.path.join(self.testroot, testcase.path + ".js"),
os.path.join(self.root, "harness-finish.js")])
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def IsNegativeTest(self, testcase):
return False
def IsFailureOutput(self, testcase):
if testcase.output.exit_code != 0:
return True
return "FAILED!" in testcase.output.stdout
def DownloadData(self):
print "SimdJs download is deprecated. It's part of DEPS."
# Clean up old directories and archive files.
directory_old_name = os.path.join(self.root, "data.old")
if os.path.exists(directory_old_name):
shutil.rmtree(directory_old_name)
archive_files = [f for f in os.listdir(self.root)
if f.startswith("ecmascript_simd-")]
if len(archive_files) > 0:
print "Clobber outdated test archives ..."
for f in archive_files:
os.remove(os.path.join(self.root, f))
def GetSuite(name, root):
return SimdJsTestSuite(name, root)
|
mpl-2.0
| -1,911,200,911,262,901,800 | 6,963,233,902,943,558,000 | 30.369863 | 74 | 0.658515 | false |
JavML/django
|
django/middleware/csrf.py
|
118
|
9930
|
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import is_same_domain
from django.utils.six.moves.urllib.parse import urlparse
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
request.META["CSRF_COOKIE_USED"] = True
return request.META["CSRF_COOKIE"]
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_key(),
})
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return self._reject(request, REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != 'https':
return self._reject(request, REASON_INSECURE_REFERER)
# If there isn't a CSRF_COOKIE_DOMAIN, assume we need an exact
# match on host:port. If not, obey the cookie rules.
if settings.CSRF_COOKIE_DOMAIN is None:
# request.get_host() includes the port.
good_referer = request.get_host()
else:
good_referer = settings.CSRF_COOKIE_DOMAIN
server_port = request.META['SERVER_PORT']
if server_port not in ('443', '80'):
good_referer = '%s:%s' % (good_referer, server_port)
# Here we generate a list of all acceptable HTTP referers,
# including the current host since that has been validated
# upstream.
good_hosts = list(settings.CSRF_TRUSTED_ORIGINS)
good_hosts.append(good_referer)
if not any(is_same_domain(referer.netloc, host) for host in good_hosts):
reason = REASON_BAD_REFERER % referer.geturl()
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
if not constant_time_compare(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
|
bsd-3-clause
| -2,649,882,466,842,691,000 | 1,476,402,927,631,368,400 | 40.375 | 95 | 0.597885 | false |
Infinidat/gitpy
|
gitpy/tag.py
|
1
|
1690
|
# Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .ref import Ref
class Tag(Ref):
def __repr__(self):
return "<tag %s>" % (self.name,)
class LocalTag(Tag):
pass
class RemoteTag(Tag):
pass
|
bsd-3-clause
| 8,283,912,155,158,568,000 | -6,388,940,185,117,171,000 | 50.212121 | 79 | 0.750296 | false |
dsully/SoCo
|
soco/alarms.py
|
6
|
11801
|
# -*- coding: utf-8 -*-
"""This module contains classes relating to Sonos Alarms."""
from __future__ import unicode_literals
import logging
import re
import weakref
from datetime import datetime
from .core import PLAY_MODES
from .xml import XML
log = logging.getLogger(__name__) # pylint: disable=C0103
TIME_FORMAT = "%H:%M:%S"
def is_valid_recurrence(text):
"""Check that ``text`` is a valid recurrence string.
A valid recurrence string is ``DAILY``, ``ONCE``, ``WEEKDAYS``,
``WEEKENDS`` or of the form ``ON_DDDDDD`` where ``D`` is a number from 0-7
representing a day of the week (Sunday is 0), e.g. ``ON_034`` meaning
Sunday, Wednesday and Thursday
Args:
text (str): the recurrence string to check.
Returns:
bool: `True` if the recurrence string is valid, else `False`.
Examples:
>>> from soco.alarms import is_valid_recurrence
>>> is_valid_recurrence('WEEKENDS')
True
>>> is_valid_recurrence('')
False
>>> is_valid_recurrence('ON_132') # Mon, Tue, Wed
True
>>> is_valid_recurrence('ON_777') # Sat
True
>>> is_valid_recurrence('ON_3421') # Mon, Tue, Wed, Thur
True
>>> is_valid_recurrence('ON_123456789') # Too many digits
False
"""
if text in ("DAILY", "ONCE", "WEEKDAYS", "WEEKENDS"):
return True
return re.search(r'^ON_[0-7]{1,7}$', text) is not None
class Alarm(object):
"""A class representing a Sonos Alarm.
Alarms may be created or updated and saved to, or removed from the Sonos
system. An alarm is not automatically saved. Call `save()` to do that.
Example:
>>> device = soco.discovery.any_soco()
>>> # create an alarm with default properties
>>> alarm = Alarm(device)
>>> print alarm.volume
20
>>> print get_alarms()
set([])
>>> # save the alarm to the Sonos system
>>> alarm.save()
>>> print get_alarms()
set([<Alarm id:88@15:26:15 at 0x107abb090>])
>>> # update the alarm
>>> alarm.recurrence = "ONCE"
>>> # Save it again for the change to take effect
>>> alarm.save()
>>> # Remove it
>>> alarm.remove()
>>> print get_alarms()
set([])
"""
# pylint: disable=too-many-instance-attributes
_all_alarms = weakref.WeakValueDictionary()
# pylint: disable=too-many-arguments
def __init__(
self, zone, start_time=None, duration=None,
recurrence='DAILY', enabled=True,
program_uri=None, program_metadata='',
play_mode='NORMAL', volume=20, include_linked_zones=False):
"""
Args:
zone (`SoCo`): The soco instance which will play the alarm.
start_time (`datetime.time`, optional): The alarm's start time.
Specify hours, minutes and seconds only. Defaults to the
current time.
duration (`datetime.time`, optional): The alarm's duration. Specify
hours, minutes and seconds only. May be `None` for unlimited
duration. Defaults to `None`.
recurrence (str, optional): A string representing how
often the alarm should be triggered. Can be ``DAILY``,
``ONCE``, ``WEEKDAYS``, ``WEEKENDS`` or of the form
``ON_DDDDDD`` where ``D`` is a number from 0-7 representing a
day of the week (Sunday is 0), e.g. ``ON_034`` meaning Sunday,
Wednesday and Thursday. Defaults to ``DAILY``.
enabled (bool, optional): `True` if alarm is enabled, `False`
otherwise. Defaults to `True`.
program_uri(str, optional): The uri to play. If `None`, the
built-in Sonos chime sound will be used. Defaults to `None`.
program_metadata (str, optional): The metadata associated with
`program_uri`. Defaults to ''.
play_mode(str, optional): The play mode for the alarm. Can be one
of ``NORMAL``, ``SHUFFLE_NOREPEAT``, ``SHUFFLE``,
``REPEAT_ALL``. Defaults to ``NORMAL``.
volume (int, optional): The alarm's volume (0-100). Defaults to 20.
include_linked_zones (bool, optional): `True` if the alarm should
be played on the other speakers in the same group, `False`
otherwise. Defaults to `False`.
"""
super(Alarm, self).__init__()
self.zone = zone
if start_time is None:
start_time = datetime.now().time()
#: `datetime.time`: The alarm's start time.
self.start_time = start_time
#: `datetime.time`: The alarm's duration.
self.duration = duration
self._recurrence = recurrence
#: `bool`: `True` if the alarm is enabled, else `False`.
self.enabled = enabled
#:
self.program_uri = program_uri
#: `str`: The uri to play.
self.program_metadata = program_metadata
self._play_mode = play_mode
self._volume = volume
#: `bool`: `True` if the alarm should be played on the other speakers
#: in the same group, `False` otherwise.
self.include_linked_zones = include_linked_zones
self._alarm_id = None
def __repr__(self):
middle = str(self.start_time.strftime(TIME_FORMAT))
return "<{0} id:{1}@{2} at {3}>".format(
self.__class__.__name__, self._alarm_id, middle, hex(id(self)))
@property
def play_mode(self):
"""
`str`: The play mode for the alarm.
Can be one of ``NORMAL``, ``SHUFFLE_NOREPEAT``, ``SHUFFLE``,
``REPEAT_ALL``.
"""
return self._play_mode
@play_mode.setter
def play_mode(self, play_mode):
"""See `playmode`."""
play_mode = play_mode.upper()
if play_mode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % play_mode)
self._play_mode = play_mode
@property
def volume(self):
"""`int`: The alarm's volume (0-100)."""
return self._volume
@volume.setter
def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) # Coerce in range
@property
def recurrence(self):
"""`str`: How often the alarm should be triggered.
Can be ``DAILY``, ``ONCE``, ``WEEKDAYS``, ``WEEKENDS`` or of the form
``ON_DDDDDDD`` where ``D`` is a number from 0-7 representing a day of
the week (Sunday is 0), e.g. ``ON_034`` meaning Sunday, Wednesday and
Thursday.
"""
return self._recurrence
@recurrence.setter
def recurrence(self, recurrence):
"""See `recurrence`."""
if not is_valid_recurrence(recurrence):
raise KeyError("'%s' is not a valid recurrence value" % recurrence)
self._recurrence = recurrence
def save(self):
"""Save the alarm to the Sonos system.
Raises:
~soco.exceptions.SoCoUPnPException: if the alarm cannot be created
because there
is already an alarm for this room at the specified time.
"""
# pylint: disable=bad-continuation
args = [
('StartLocalTime', self.start_time.strftime(TIME_FORMAT)),
('Duration', '' if self.duration is None else
self.duration.strftime(TIME_FORMAT)),
('Recurrence', self.recurrence),
('Enabled', '1' if self.enabled else '0'),
('RoomUUID', self.zone.uid),
('ProgramURI', "x-rincon-buzzer:0" if self.program_uri is None
else self.program_uri),
('ProgramMetaData', self.program_metadata),
('PlayMode', self.play_mode),
('Volume', self.volume),
('IncludeLinkedZones', '1' if self.include_linked_zones else '0')
]
if self._alarm_id is None:
response = self.zone.alarmClock.CreateAlarm(args)
self._alarm_id = response['AssignedID']
Alarm._all_alarms[self._alarm_id] = self
else:
# The alarm has been saved before. Update it instead.
args.insert(0, ('ID', self._alarm_id))
self.zone.alarmClock.UpdateAlarm(args)
def remove(self):
"""Remove the alarm from the Sonos system.
There is no need to call `save`. The Python instance is not deleted,
and can be saved back to Sonos again if desired.
"""
self.zone.alarmClock.DestroyAlarm([
('ID', self._alarm_id)
])
alarm_id = self._alarm_id
try:
del Alarm._all_alarms[alarm_id]
except KeyError:
pass
self._alarm_id = None
def get_alarms(soco=None):
"""Get a set of all alarms known to the Sonos system.
Args:
soco (`SoCo`, optional): a SoCo instance to query. If None, a random
instance is used. Defaults to `None`.
Returns:
set: A set of `Alarm` instances
Note:
Any existing `Alarm` instance will have its attributes updated to those
currently stored on the Sonos system.
"""
# Get a soco instance to query. It doesn't matter which.
if soco is None:
soco = soco.discovery.any_soco()
response = soco.alarmClock.ListAlarms()
alarm_list = response['CurrentAlarmList']
tree = XML.fromstring(alarm_list.encode('utf-8'))
# An alarm list looks like this:
# <Alarms>
# <Alarm ID="14" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ1400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# <Alarm ID="15" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ01400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# </Alarms>
# pylint: disable=protected-access
alarms = tree.findall('Alarm')
result = set()
for alarm in alarms:
values = alarm.attrib
alarm_id = values['ID']
# If an instance already exists for this ID, update and return it.
# Otherwise, create a new one and populate its values
if Alarm._all_alarms.get(alarm_id):
instance = Alarm._all_alarms.get(alarm_id)
else:
instance = Alarm(None)
instance._alarm_id = alarm_id
Alarm._all_alarms[instance._alarm_id] = instance
instance.start_time = datetime.strptime(
values['StartTime'], "%H:%M:%S").time() # NB StartTime, not
# StartLocalTime, which is used by CreateAlarm
instance.duration = None if values['Duration'] == '' else\
datetime.strptime(values['Duration'], "%H:%M:%S").time()
instance.recurrence = values['Recurrence']
instance.enabled = values['Enabled'] == '1'
instance.zone = [zone for zone in soco.all_zones
if zone.uid == values['RoomUUID']][0]
instance.program_uri = None if values['ProgramURI'] ==\
"x-rincon-buzzer:0" else values['ProgramURI']
instance.program_metadata = values['ProgramMetaData']
instance.play_mode = values['PlayMode']
instance.volume = values['Volume']
instance.include_linked_zones = values['IncludeLinkedZones'] == '1'
result.add(instance)
return result
|
mit
| 7,764,021,769,282,148,000 | 7,780,054,261,294,627,000 | 36.227129 | 79 | 0.573002 | false |
phantasien/falkor
|
deps/bastian/tools/gyp/pylib/gyp/ordered_dict.py
|
2354
|
10366
|
# Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
mit
| 7,947,180,647,540,951,000 | 5,650,295,614,457,429,000 | 34.868512 | 87 | 0.578912 | false |
sebrandon1/bitcoin
|
test/functional/test_framework/address.py
|
19
|
2850
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
from . import segwit_addr
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
|
mit
| 1,125,720,701,722,624,300 | -6,681,754,912,897,125,000 | 31.758621 | 73 | 0.659649 | false |
awkspace/ansible
|
test/units/modules/storage/netapp/test_na_ontap_unix_user.py
|
43
|
11306
|
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_unix_user \
import NetAppOntapUnixUser as user_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'user':
xml = self.build_user_info(self.params)
elif self.kind == 'user-fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_user_info(data):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = \
{'attributes-list': {'unix-user-info': {'user-id': data['id'],
'group-id': data['group_id'], 'full-name': data['full_name']}},
'num-records': 1}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_user = {
'name': 'test',
'id': '11',
'group_id': '12',
'vserver': 'something',
'full_name': 'Test User'
}
def mock_args(self):
return {
'name': self.mock_user['name'],
'group_id': self.mock_user['group_id'],
'id': self.mock_user['id'],
'vserver': self.mock_user['vserver'],
'full_name': self.mock_user['full_name'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_user_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_unix_user object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_unix_user object
"""
obj = user_module()
obj.autosupport_log = Mock(return_value=None)
if data is None:
data = self.mock_user
obj.server = MockONTAPConnection(kind=kind, data=data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
user_module()
def test_get_nonexistent_user(self):
''' Test if get_unix_user returns None for non-existent user '''
set_module_args(self.mock_args())
result = self.get_user_mock_object().get_unix_user()
assert result is None
def test_get_existing_user(self):
''' Test if get_unix_user returns details for existing user '''
set_module_args(self.mock_args())
result = self.get_user_mock_object('user').get_unix_user()
assert result['full_name'] == self.mock_user['full_name']
def test_get_xml(self):
set_module_args(self.mock_args())
obj = self.get_user_mock_object('user')
result = obj.get_unix_user()
assert obj.server.xml_in['query']
assert obj.server.xml_in['query']['unix-user-info']
user_info = obj.server.xml_in['query']['unix-user-info']
assert user_info['user-name'] == self.mock_user['name']
assert user_info['vserver'] == self.mock_user['vserver']
def test_create_error_missing_params(self):
data = self.mock_args()
del data['group_id']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user').create_unix_user()
assert 'Error: Missing one or more required parameters for create: (group_id, id)' == exc.value.args[0]['msg']
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user')
def test_create_called(self, create_user):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_user_mock_object().apply()
assert exc.value.args[0]['changed']
create_user.assert_called_with()
def test_create_xml(self):
'''Test create ZAPI element'''
set_module_args(self.mock_args())
create = self.get_user_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
create.apply()
mock_key = {
'user-name': 'name',
'group-id': 'group_id',
'user-id': 'id',
'full-name': 'full_name'
}
for key in ['user-name', 'user-id', 'group-id', 'full-name']:
assert create.server.xml_in[key] == self.mock_user[mock_key[key]]
def test_create_wihtout_full_name(self):
'''Test create ZAPI element'''
data = self.mock_args()
del data['full_name']
set_module_args(data)
create = self.get_user_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
create.apply()
with pytest.raises(KeyError):
create.server.xml_in['full-name']
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user')
def test_delete_called(self, delete_user, modify_user):
''' Test delete existing user '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_user_mock_object('user').apply()
assert exc.value.args[0]['changed']
delete_user.assert_called_with()
assert modify_user.call_count == 0
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.get_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
def test_modify_called(self, modify_user, get_user):
''' Test modify user group_id '''
data = self.mock_args()
data['group_id'] = 20
set_module_args(data)
get_user.return_value = {'group_id': 10}
obj = self.get_user_mock_object('user')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
get_user.assert_called_with()
modify_user.assert_called_with({'group_id': 20})
def test_modify_only_id(self):
''' Test modify user id '''
set_module_args(self.mock_args())
modify = self.get_user_mock_object('user')
modify.modify_unix_user({'id': 123})
assert modify.server.xml_in['user-id'] == '123'
with pytest.raises(KeyError):
modify.server.xml_in['group-id']
with pytest.raises(KeyError):
modify.server.xml_in['full-name']
def test_modify_xml(self):
''' Test modify user full_name '''
set_module_args(self.mock_args())
modify = self.get_user_mock_object('user')
modify.modify_unix_user({'full_name': 'New Name',
'group_id': '25'})
assert modify.server.xml_in['user-name'] == self.mock_user['name']
assert modify.server.xml_in['full-name'] == 'New Name'
assert modify.server.xml_in['group-id'] == '25'
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user')
@patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user')
def test_do_nothing(self, modify, delete, create):
''' changed is False and none of the opetaion methods are called'''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
obj = self.get_user_mock_object()
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
create.assert_not_called()
delete.assert_not_called()
modify.assert_not_called()
def test_get_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').get_unix_user()
assert 'Error getting UNIX user' in exc.value.args[0]['msg']
def test_create_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').create_unix_user()
assert 'Error creating UNIX user' in exc.value.args[0]['msg']
def test_modify_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').modify_unix_user({'id': '123'})
assert 'Error modifying UNIX user' in exc.value.args[0]['msg']
def test_delete_exception(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleFailJson) as exc:
self.get_user_mock_object('user-fail').delete_unix_user()
assert 'Error removing UNIX user' in exc.value.args[0]['msg']
|
gpl-3.0
| 6,712,992,580,340,319,000 | -9,129,114,235,579,714,000 | 39.092199 | 118 | 0.613126 | false |
shish/sdog
|
sdog/monitor.py
|
1
|
4065
|
#!/usr/bin/env python
import socket
import os
import subprocess
from optparse import OptionParser
from time import time, sleep
from select import select
import sys
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(title):
pass
def main(argv=sys.argv):
parser = OptionParser(usage="%prog [sdog options] -- daemon-to-run [daemon options]")
parser.add_option("-t", "--timeout", dest="timeout", type=int, default=10,
help="Maximum seconds between pings", metavar="N")
parser.add_option("-r", "--respawn", dest="respawn", type=int, default=1,
help="Delay between respawns", metavar="N")
parser.add_option("-T", "--title", dest="title",
help="Set process title", metavar="NAME")
parser.add_option("-s", "--socket", dest="soc_loc",
# FIXME: probably (almost certainly) insecure,
# need tmpfile.NamedTemporaryFile() for sockets
default="/tmp/sdog-%d.sock" % os.getpid(),
help="Path to socket", metavar="FILE")
parser.add_option("-v", "--verbose", dest="verbose",
default=False, action="store_true",
help="Verbose mode")
(options, args) = parser.parse_args()
if args:
launch(options, args)
else:
parser.error("Need to specify a program to launch")
def launch(options, args):
c = Child(options, args)
try:
c.watch()
finally:
if os.path.exists(options.soc_loc):
os.unlink(options.soc_loc)
class Child(object):
def __init__(self, opts, args):
self.opts = opts
self.args = args
self.proc = None
self.ready = False
self.sock = None
self.last_ok = 0
def watch(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.sock.bind(self.opts.soc_loc)
while True:
try:
self.poll()
except Exception as e:
print "SDog Error:", e
self.proc = None
sleep(5)
def status(self, status):
if self.opts.verbose:
print status
setproctitle("sdog: %s: %s" % (self.opts.title or self.args[0], status))
def poll(self):
if not self.proc:
self.status("spawning: %s" % self.args)
env = os.environ.copy()
env["NOTIFY_SOCKET"] = self.opts.soc_loc
self.proc = subprocess.Popen(self.args, env=env)
self.status("launched subprocess with PID: %d" % self.proc.pid)
self.last_ok = time()
self.ready = False
return
status = self.proc.poll()
if status is not None:
self.status("Process exited with status code %d, respawning after %d seconds" % (status, self.opts.respawn))
self.proc = None
sleep(self.opts.respawn)
return
rs, ws, xs = select([self.sock], [], [], 1.0)
if rs:
packet, addr = self.sock.recvfrom(1024)
for line in packet.split("\n"):
k, _, v = line.partition("=")
#print "Got message: ", k, v
if k == "WATCHDOG" and v == "1":
self.last_ok = time()
if k == "READY" and v == "1" and not self.ready:
self.status("Daemon is ready")
self.ready = True
if k == "STATUS":
self.status(v)
if k == "ERRNO":
self.errno = v
if k == "BUSERROR":
self.buserror = v
if k == "MAINPID":
self.mainpid = v
if time() > self.last_ok + self.opts.timeout:
self.status("No OK message for %d seconds, killing child" % (time() - self.last_ok))
self.proc.kill()
self.proc = None
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
mit
| 8,198,986,138,202,922,000 | -4,416,333,537,569,022,500 | 32.04878 | 120 | 0.518819 | false |
40223105/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/reprlib.py
|
923
|
5110
|
"""Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
|
gpl-3.0
| 431,125,589,050,205,630 | 1,061,713,378,209,060,100 | 31.547771 | 79 | 0.526614 | false |
rakeshmi/cinder
|
cinder/tests/unit/test_replication.py
|
5
|
5069
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Volume replication code.
"""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit import utils as test_utils
from cinder.volume import driver
CONF = cfg.CONF
class VolumeReplicationTestCase(test.TestCase):
def setUp(self):
super(VolumeReplicationTestCase, self).setUp()
self.ctxt = context.RequestContext('user', 'fake', False)
self.adm_ctxt = context.RequestContext('admin', 'fake', True)
self.manager = importutils.import_object(CONF.volume_manager)
self.manager.host = 'test_host'
self.manager.stats = {'allocated_capacity_gb': 0}
self.driver_patcher = mock.patch.object(self.manager, 'driver',
spec=driver.VolumeDriver)
self.driver = self.driver_patcher.start()
@mock.patch('cinder.utils.require_driver_initialized')
def test_promote_replica_uninit_driver(self, _init):
"""Test promote replication when driver is not initialized."""
_init.side_effect = exception.DriverNotInitialized
vol = test_utils.create_volume(self.ctxt,
status='available',
replication_status='active')
self.driver.promote_replica.return_value = None
self.assertRaises(exception.DriverNotInitialized,
self.manager.promote_replica,
self.adm_ctxt,
vol['id'])
def test_promote_replica(self):
"""Test promote replication."""
vol = test_utils.create_volume(self.ctxt,
status='available',
replication_status='active')
self.driver.promote_replica.return_value = \
{'replication_status': 'inactive'}
self.manager.promote_replica(self.adm_ctxt, vol['id'])
vol_after = db.volume_get(self.ctxt, vol['id'])
self.assertEqual(vol_after['replication_status'], 'inactive')
def test_promote_replica_fail(self):
"""Test promote replication when promote fails."""
vol = test_utils.create_volume(self.ctxt,
status='available',
replication_status='active')
self.driver.promote_replica.side_effect = exception.CinderException
self.assertRaises(exception.CinderException,
self.manager.promote_replica,
self.adm_ctxt,
vol['id'])
def test_reenable_replication(self):
"""Test reenable replication."""
vol = test_utils.create_volume(self.ctxt,
status='available',
replication_status='error')
self.driver.reenable_replication.return_value = \
{'replication_status': 'copying'}
self.manager.reenable_replication(self.adm_ctxt, vol['id'])
vol_after = db.volume_get(self.ctxt, vol['id'])
self.assertEqual(vol_after['replication_status'], 'copying')
@mock.patch('cinder.utils.require_driver_initialized')
def test_reenable_replication_uninit_driver(self, _init):
"""Test reenable replication when driver is not initialized."""
_init.side_effect = exception.DriverNotInitialized
vol = test_utils.create_volume(self.ctxt,
status='available',
replication_status='error')
self.assertRaises(exception.DriverNotInitialized,
self.manager.reenable_replication,
self.adm_ctxt,
vol['id'])
def test_reenable_replication_fail(self):
"""Test promote replication when driver is not initialized."""
vol = test_utils.create_volume(self.ctxt,
status='available',
replication_status='error')
self.driver.reenable_replication.side_effect = \
exception.CinderException
self.assertRaises(exception.CinderException,
self.manager.reenable_replication,
self.adm_ctxt,
vol['id'])
|
apache-2.0
| -4,654,042,234,293,038,000 | 3,904,778,608,097,449,000 | 43.858407 | 78 | 0.588874 | false |
benfinke/ns_python
|
build/lib/nssrc/com/citrix/netscaler/nitro/resource/stat/system/system_stats.py
|
3
|
18080
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class system_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._voltagev12n = 0
self._voltagev5n = 0
self._cpuusage = 0
self._rescpuusage = 0
self._slavecpuusage = 0
self._mastercpuusage = 0
self._auxvolt7 = 0
self._auxvolt6 = 0
self._auxvolt5 = 0
self._auxvolt4 = 0
self._auxvolt3 = 0
self._auxvolt2 = 0
self._auxvolt1 = 0
self._auxvolt0 = 0
self._voltagevsen2 = 0
self._voltagev5sb = 0
self._voltagevtt = 0
self._voltagevbat = 0
self._voltagev12p = 0
self._voltagev5p = 0
self._voltagev33stby = 0
self._voltagev33main = 0
self._voltagevcc1 = 0
self._voltagevcc0 = 0
self._numcpus = 0
self._memusagepcnt = 0
self._memuseinmb = 0
self._mgmtcpuusagepcnt = 0
self._pktcpuusagepcnt = 0
self._cpuusagepcnt = 0
self._rescpuusagepcnt = 0
self._starttime = ""
self._disk0perusage = 0
self._disk1perusage = 0
self._cpufan0speed = 0
self._cpufan1speed = 0
self._systemfanspeed = 0
self._fan0speed = 0
self._fanspeed = 0
self._cpu0temp = 0
self._cpu1temp = 0
self._internaltemp = 0
self._powersupply1status = ""
self._powersupply2status = ""
self._disk0size = 0
self._disk0used = 0
self._disk0avail = 0
self._disk1size = 0
self._disk1used = 0
self._disk1avail = 0
self._fan2speed = 0
self._fan3speed = 0
self._fan4speed = 0
self._fan5speed = 0
self._auxtemp0 = 0
self._auxtemp1 = 0
self._auxtemp2 = 0
self._auxtemp3 = 0
self._powersupply3status = ""
self._powersupply4status = ""
self._timesincestart = ""
self._memsizemb = 0
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def voltagevbat(self) :
ur"""Onboard battery power supply output. 9800 and 9950 platforms display standard value of 5.0V.
"""
try :
return self._voltagevbat
except Exception as e:
raise e
@property
def auxvolt2(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 2.
"""
try :
return self._auxvolt2
except Exception as e:
raise e
@property
def voltagev5n(self) :
ur"""Power supply -5V output. Acceptable range is -5.50 through -4.50 volts. 9800 and 9960 platforms display standard value of -5.0V.
"""
try :
return self._voltagev5n
except Exception as e:
raise e
@property
def auxvolt7(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 7.
"""
try :
return self._auxvolt7
except Exception as e:
raise e
@property
def cpu0temp(self) :
ur"""CPU 0 temperature. 9800 and 9960 platforms display internal chip temperature. This is a critical counter.
You can configure CPU 0 Temperature by using the Set snmp alarm TEMPERATURE-HIGH command to set the upper limit.
"""
try :
return self._cpu0temp
except Exception as e:
raise e
@property
def fan0speed(self) :
ur"""System fan 1 speed. For new platforms associated pin is connected to CPU supporting fans. For platforms in which it is not connected, it will point to System Fan.
"""
try :
return self._fan0speed
except Exception as e:
raise e
@property
def disk1perusage(self) :
ur"""Used space in /var partition of the disk, as a percentage. This is a critical counter. You can configure /var Used (%) by using the Set snmp alarm DISK-USAGE-HIGH command.
"""
try :
return self._disk1perusage
except Exception as e:
raise e
@property
def rescpuusagepcnt(self) :
ur"""Average CPU utilization percentage. Not applicable for a single-CPU system.
"""
try :
return self._rescpuusagepcnt
except Exception as e:
raise e
@property
def disk1used(self) :
ur"""Used space in /var partition of the hard disk.
"""
try :
return self._disk1used
except Exception as e:
raise e
@property
def disk1avail(self) :
ur"""Available space in /var partition of the hard disk.
"""
try :
return self._disk1avail
except Exception as e:
raise e
@property
def fan4speed(self) :
ur"""Speed of Fan 2 if associated pin is connected to health monitoring chip.
"""
try :
return self._fan4speed
except Exception as e:
raise e
@property
def rescpuusage(self) :
ur"""Shows average CPU utilization percentage if more than 1 CPU is present.
"""
try :
return self._rescpuusage
except Exception as e:
raise e
@property
def powersupply3status(self) :
ur"""Power supply 3 failure status.
"""
try :
return self._powersupply3status
except Exception as e:
raise e
@property
def auxvolt5(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 5.
"""
try :
return self._auxvolt5
except Exception as e:
raise e
@property
def auxvolt3(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 3.
"""
try :
return self._auxvolt3
except Exception as e:
raise e
@property
def disk0perusage(self) :
ur"""Used space in /flash partition of the disk, as a percentage. This is a critical counter.
You can configure /flash Used (%) by using the Set snmp alarm DISK-USAGE-HIGH command.
"""
try :
return self._disk0perusage
except Exception as e:
raise e
@property
def fan2speed(self) :
ur"""Speed of Fan 0 if associated pin is connected to health monitoring chip.
"""
try :
return self._fan2speed
except Exception as e:
raise e
@property
def powersupply4status(self) :
ur"""Power supply 4 failure status.
"""
try :
return self._powersupply4status
except Exception as e:
raise e
@property
def auxvolt1(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 1.
"""
try :
return self._auxvolt1
except Exception as e:
raise e
@property
def fanspeed(self) :
ur"""System fan 2 speed. For new platforms associated pin is connected to CPU supporting fans. For platforms in which it is not connected, it will point to System Fan.
"""
try :
return self._fanspeed
except Exception as e:
raise e
@property
def fan5speed(self) :
ur"""Speed of Fan 3 if associated pin is connected to health monitoring chip.
"""
try :
return self._fan5speed
except Exception as e:
raise e
@property
def disk0size(self) :
ur"""Size of /flash partition of the hard disk.
"""
try :
return self._disk0size
except Exception as e:
raise e
@property
def mgmtcpuusagepcnt(self) :
ur"""Management CPU utilization percentage.
"""
try :
return self._mgmtcpuusagepcnt
except Exception as e:
raise e
@property
def cpuusage(self) :
ur"""CPU utilization percentage.
"""
try :
return self._cpuusage
except Exception as e:
raise e
@property
def voltagev5sb(self) :
ur"""Power Supply 5V Standby Voltage. Currently only 13k Platforms will have valid value for this counter and for older platforms this will be 0.
"""
try :
return self._voltagev5sb
except Exception as e:
raise e
@property
def disk0used(self) :
ur"""Used space in /flash partition of the hard disk.
"""
try :
return self._disk0used
except Exception as e:
raise e
@property
def powersupply1status(self) :
ur"""Power supply 1 failure status.
"""
try :
return self._powersupply1status
except Exception as e:
raise e
@property
def cpufan0speed(self) :
ur"""CPU Fan 0 speed. Acceptable range is 3000 through 6000 RPM. This is a critical counter.
You can configure CPU Fan 0 Speed by using the Set snmp alarm FAN-SPEED-LOW command to set the lower limit.
"""
try :
return self._cpufan0speed
except Exception as e:
raise e
@property
def disk1size(self) :
ur"""Size of /var partition of the hard disk.
"""
try :
return self._disk1size
except Exception as e:
raise e
@property
def auxtemp1(self) :
ur"""Temperature of a device connected to health monitoring chip through pin 1.
"""
try :
return self._auxtemp1
except Exception as e:
raise e
@property
def numcpus(self) :
ur"""The number of CPUs on the NetScaler appliance.
"""
try :
return self._numcpus
except Exception as e:
raise e
@property
def pktcpuusagepcnt(self) :
ur"""Average CPU utilization percentage for all packet engines excluding management PE.
"""
try :
return self._pktcpuusagepcnt
except Exception as e:
raise e
@property
def voltagev5p(self) :
ur"""Power supply +5V output. Acceptable range is 4.50 through 5.50 volts.
"""
try :
return self._voltagev5p
except Exception as e:
raise e
@property
def voltagevsen2(self) :
ur"""Voltage Sensor 2 Input. Currently only 13k Platforms will have valid value for this counter and for older platforms this will be 0.
"""
try :
return self._voltagevsen2
except Exception as e:
raise e
@property
def auxvolt0(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 0.
"""
try :
return self._auxvolt0
except Exception as e:
raise e
@property
def auxtemp2(self) :
ur"""Temperature of a device connected to health monitoring chip through pin 2.
"""
try :
return self._auxtemp2
except Exception as e:
raise e
@property
def memsizemb(self) :
ur"""Total amount of system memory, in megabytes.
"""
try :
return self._memsizemb
except Exception as e:
raise e
@property
def voltagev33main(self) :
ur"""Main power supply +3.3V output. Acceptable range is 2.970 through 3.630 volts. This is a critical counter.
You can configure Main 3.3V Supply Voltage, by using the Set snmp alarm VOLTAGE-LOW command to set the lower limit and the Set snmp alarm VOLTAGE-HIGH command to set the upper limit.
"""
try :
return self._voltagev33main
except Exception as e:
raise e
@property
def cpu1temp(self) :
ur"""CPU 1 temperature. 9800 and 9960 platforms display internal chip temperature. 7000, 9010 and 10010 platforms display CPU 0 temperature. This is a critical counter.
You can configure CPU 1 Temperature by using the Set snmp alarm TEMPERATURE-HIGH command to set the upper limit.
"""
try :
return self._cpu1temp
except Exception as e:
raise e
@property
def voltagev12n(self) :
ur"""Power supply -12V output. Acceptable range is -13.20 through -10.80 volts. 9800 and 9960 platforms display standard value of -12.0V.
"""
try :
return self._voltagev12n
except Exception as e:
raise e
@property
def memuseinmb(self) :
ur"""Main memory currently in use, in megabytes.
"""
try :
return self._memuseinmb
except Exception as e:
raise e
@property
def auxtemp3(self) :
ur"""Temperature of a device connected to health monitoring chip through pin 3.
"""
try :
return self._auxtemp3
except Exception as e:
raise e
@property
def internaltemp(self) :
ur"""Internal temperature of health monitoring chip. This is a critical counter.
You can configure Internal Temperature by using the Set snmp alarm TEMPERATURE-HIGH command to set the upper limit.
"""
try :
return self._internaltemp
except Exception as e:
raise e
@property
def voltagev12p(self) :
ur"""Power supply +12V output. Acceptable range is 10.80 through 13.20 volts.
"""
try :
return self._voltagev12p
except Exception as e:
raise e
@property
def disk0avail(self) :
ur"""Available space in /flash partition of the hard disk.
"""
try :
return self._disk0avail
except Exception as e:
raise e
@property
def voltagev33stby(self) :
ur"""Standby power supply +3.3V output. Acceptable range is 2.970 through 3.630 volts. 9800 and 9960 platforms display standard value of 3.3V.
You can configure Standby 3.3V Supply Voltage by using the Set snmp alarm VOLTAGE-LOW command to set the lower limit and the Set snmp alarm VOLTAGE-HIGH command to set the upper limit.
"""
try :
return self._voltagev33stby
except Exception as e:
raise e
@property
def voltagevcc1(self) :
ur"""CPU core 1 voltage. Acceptable range is 1.080 through 1.650 volts. If CPU 1 is not connected to the health monitoring chip, display shows voltage of CPU 0.
"""
try :
return self._voltagevcc1
except Exception as e:
raise e
@property
def fan3speed(self) :
ur"""Speed of Fan 1 if associated pin is connected to health monitoring chip.
"""
try :
return self._fan3speed
except Exception as e:
raise e
@property
def voltagevtt(self) :
ur"""Intel CPU Vtt power. Currently only 13k Platforms will have valid value for this counter and for older platforms this will be 0.
"""
try :
return self._voltagevtt
except Exception as e:
raise e
@property
def auxtemp0(self) :
ur"""Temperature of a device connected to health monitoring chip through pin 0.
"""
try :
return self._auxtemp0
except Exception as e:
raise e
@property
def cpufan1speed(self) :
ur"""CPU Fan 1 speed. Acceptable range is 3000 through 6000 RPM. 7000 platform displays speed of CPU fan 0. This is a critical counter.
You can configure CPU Fan 1 Speed by using the Set snmp alarm FAN-SPEED-LOW command to set the lower limit.
"""
try :
return self._cpufan1speed
except Exception as e:
raise e
@property
def voltagevcc0(self) :
ur"""CPU core 0 voltage. Acceptable range is 1.080 through 1.650 volts.
"""
try :
return self._voltagevcc0
except Exception as e:
raise e
@property
def auxvolt4(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 4.
"""
try :
return self._auxvolt4
except Exception as e:
raise e
@property
def starttime(self) :
ur"""Time when the NetScaler appliance was last started.
"""
try :
return self._starttime
except Exception as e:
raise e
@property
def systemfanspeed(self) :
ur"""System fan speed. Acceptable range is 3000 through 6000 RPM. This is a critical counter.
You can configure System Fan Speed by using the Set snmp alarm FAN-SPEED-LOW command to set the lower limit.
"""
try :
return self._systemfanspeed
except Exception as e:
raise e
@property
def cpuusagepcnt(self) :
ur"""CPU utilization percentage.
"""
try :
return self._cpuusagepcnt
except Exception as e:
raise e
@property
def mastercpuusage(self) :
ur"""CPU 0 (currently the master CPU) utilization, as percentage of capacity.
"""
try :
return self._mastercpuusage
except Exception as e:
raise e
@property
def timesincestart(self) :
ur"""Seconds since the NetScaler appliance started.
"""
try :
return self._timesincestart
except Exception as e:
raise e
@property
def auxvolt6(self) :
ur"""Voltage of a device connected to health monitoring chip through pin 6.
"""
try :
return self._auxvolt6
except Exception as e:
raise e
@property
def slavecpuusage(self) :
ur"""CPU 1 (currently the slave CPU) utilization, as percentage of capacity. Not applicable for a single-CPU system.
"""
try :
return self._slavecpuusage
except Exception as e:
raise e
@property
def memusagepcnt(self) :
ur"""Percentage of memory utilization on NetScaler.
"""
try :
return self._memusagepcnt
except Exception as e:
raise e
@property
def powersupply2status(self) :
ur"""Power supply 2 failure status.
"""
try :
return self._powersupply2status
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(system_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.system
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all system_stats resources that are configured on netscaler.
"""
try :
obj = system_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class system_response(base_response) :
def __init__(self, length=1) :
self.system = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.system = [system_stats() for _ in range(length)]
|
apache-2.0
| 2,341,400,355,557,804,000 | -482,349,336,189,258,600 | 23.903581 | 186 | 0.698617 | false |
yannrouillard/weboob
|
weboob/applications/boobill/boobill.py
|
2
|
8189
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012-2013 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import sys
from decimal import Decimal
from weboob.capabilities.bill import ICapBill, Detail, Subscription
from weboob.tools.application.repl import ReplApplication, defaultcount
from weboob.tools.application.formatters.iformatter import PrettyFormatter
from weboob.tools.application.base import MoreResultsAvailable
from weboob.core import CallErrors
__all__ = ['Boobill']
class SubscriptionsFormatter(PrettyFormatter):
MANDATORY_FIELDS = ('id', 'label')
def get_title(self, obj):
if obj.renewdate:
return u"%s - %s" % (obj.label, obj.renewdate.strftime('%d/%m/%y'))
return obj.label
class Boobill(ReplApplication):
APPNAME = 'boobill'
VERSION = '0.i'
COPYRIGHT = 'Copyright(C) 2012 Florent Fourcot'
DESCRIPTION = 'Console application allowing to get and download bills.'
SHORT_DESCRIPTION = "get and download bills"
CAPS = ICapBill
COLLECTION_OBJECTS = (Subscription, )
EXTRA_FORMATTERS = {'subscriptions': SubscriptionsFormatter,
}
DEFAULT_FORMATTER = 'table'
COMMANDS_FORMATTERS = {'subscriptions': 'subscriptions',
'ls': 'subscriptions',
}
def main(self, argv):
self.load_config()
return ReplApplication.main(self, argv)
def exec_method(self, id, method):
l = []
id, backend_name = self.parse_id(id)
if not id:
for subscrib in self.get_object_list('iter_subscription'):
l.append((subscrib.id, subscrib.backend))
else:
l.append((id, backend_name))
more_results = []
not_implemented = []
self.start_format()
for id, backend in l:
names = (backend,) if backend is not None else None
try:
for backend, result in self.do(method, id, backends=names):
self.format(result)
except CallErrors as errors:
for backend, error, backtrace in errors:
if isinstance(error, MoreResultsAvailable):
more_results.append(id + u'@' + backend.name)
elif isinstance(error, NotImplementedError):
if backend not in not_implemented:
not_implemented.append(backend)
else:
self.bcall_error_handler(backend, error, backtrace)
if len(more_results) > 0:
print >>sys.stderr, 'Hint: There are more results available for %s (use option -n or count command)' % (', '.join(more_results))
for backend in not_implemented:
print >>sys.stderr, u'Error(%s): This feature is not supported yet by this backend.' % backend.name
def do_subscriptions(self, line):
"""
subscriptions
List all subscriptions.
"""
self.start_format()
for subscription in self.get_object_list('iter_subscription'):
self.format(subscription)
def do_details(self, id):
"""
details [ID]
Get details of subscriptions.
If no ID given, display all details of all backends.
"""
l = []
id, backend_name = self.parse_id(id)
if not id:
for subscrib in self.get_object_list('iter_subscription'):
l.append((subscrib.id, subscrib.backend))
else:
l.append((id, backend_name))
for id, backend in l:
names = (backend,) if backend is not None else None
# XXX: should be generated by backend? -Flo
# XXX: no, but you should do it in a specific formatter -romain
# TODO: do it, and use exec_method here. Code is obsolete
mysum = Detail()
mysum.label = u"Sum"
mysum.infos = u"Generated by boobill"
mysum.price = Decimal("0.")
self.start_format()
for backend, detail in self.do('get_details', id, backends=names):
self.format(detail)
mysum.price = detail.price + mysum.price
self.format(mysum)
def do_balance(self, id):
"""
balance [ID]
Get balance of subscriptions.
If no ID given, display balance of all backends.
"""
self.exec_method(id, 'get_balance')
@defaultcount(10)
def do_history(self, id):
"""
history [ID]
Get the history of subscriptions.
If no ID given, display histories of all backends.
"""
self.exec_method(id, 'iter_bills_history')
@defaultcount(10)
def do_bills(self, id):
"""
bills [ID]
Get the list of bills documents for subscriptions.
If no ID given, display bills of all backends
"""
self.exec_method(id, 'iter_bills')
def do_download(self, line):
"""
download [ID | all] [FILENAME]
download ID [FILENAME]
download the bill
id is the identifier of the bill (hint: try bills command)
FILENAME is where to write the file. If FILENAME is '-',
the file is written to stdout.
download all [ID]
You can use special word "all" and download all bills of
subscription identified by ID.
If Id not given, download bills of all subscriptions.
"""
id, dest = self.parse_command_args(line, 2, 1)
id, backend_name = self.parse_id(id)
if not id:
print >>sys.stderr, 'Error: please give a bill ID (hint: use bills command)'
return 2
names = (backend_name,) if backend_name is not None else None
# Special keywords, download all bills of all subscriptions
if id == "all":
if dest is None:
for backend, subscription in self.do('iter_subscription', backends=names):
self.download_all(subscription.id, names)
return
else:
self.download_all(dest, names)
return
if dest is None:
for backend, bill in self.do('get_bill', id, backends=names):
dest = id + "." + bill.format
for backend, buf in self.do('download_bill', id, backends=names):
if buf:
if dest == "-":
print buf
else:
try:
with open(dest, 'w') as f:
f.write(buf)
except IOError as e:
print >>sys.stderr, 'Unable to write bill in "%s": %s' % (dest, e)
return 1
return
def download_all(self, id, names):
id, backend_name = self.parse_id(id)
for backend, bill in self.do('iter_bills', id, backends=names):
dest = bill.id + "." + bill.format
for backend2, buf in self.do('download_bill', bill.id, backends=names):
if buf:
if dest == "-":
print buf
else:
try:
with open(dest, 'w') as f:
f.write(buf)
except IOError as e:
print >>sys.stderr, 'Unable to write bill in "%s": %s' % (dest, e)
return 1
return
|
agpl-3.0
| 6,179,938,815,415,786,000 | -1,556,981,328,334,697,700 | 33.995726 | 140 | 0.559043 | false |
bgxavier/nova
|
nova/tests/unit/virt/test_events.py
|
113
|
1178
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova import test
from nova.virt import event
class TestEvents(test.NoDBTestCase):
def test_event_repr(self):
t = time.time()
uuid = '1234'
lifecycle = event.EVENT_LIFECYCLE_RESUMED
e = event.Event(t)
self.assertEqual(str(e), "<Event: %s>" % t)
e = event.InstanceEvent(uuid, timestamp=t)
self.assertEqual(str(e), "<InstanceEvent: %s, %s>" % (t, uuid))
e = event.LifecycleEvent(uuid, lifecycle, timestamp=t)
self.assertEqual(str(e), "<LifecycleEvent: %s, %s => Resumed>" %
(t, uuid))
|
apache-2.0
| -3,646,026,463,429,049,000 | -6,097,894,297,385,241,000 | 31.722222 | 75 | 0.668081 | false |
chenjun0210/tensorflow
|
tensorflow/python/kernel_tests/slice_op_test.py
|
48
|
11026
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.test_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.test_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[2, k:k])
def testSelectAll(self):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, slice_explicit_t.eval())
self.assertAllEqual(inp, slice_implicit_t.eval())
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = scalar_t.eval()
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[lo:hi])
def testScalarInput(self):
input_val = 0
with self.test_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[:]
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def testInvalidIndex(self):
input_val = [1, 2]
with self.test_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[1:, 1:]
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def _testSliceMatrixDim0(self, x, begin, size):
with self.test_session(use_gpu=True):
tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = slice_t.eval()
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with self.test_session(use_gpu=True) as sess:
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
self.assertAllEqual(slice_val, inp[:2, :2])
self.assertAllEqual(slice2_val, inp[:2, :2])
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
with self.test_session(use_gpu=True) as sess:
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1)
for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]
+ sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:
indices[5] + sizes[5]]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[
3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[
5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.test_session(use_gpu=True):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = grad.eval()
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.test_session(use_gpu=True):
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = gradients_impl.gradients(out, inp)[0].eval()
self.assertAllClose([0., 1., 1.], grad_actual)
def testGradientsAll(self):
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
def testNotIterable(self):
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesWithPredicateMatch(
TypeError, lambda e: "'Tensor' object is not iterable" in str(e)):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.test_session(use_gpu=True):
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], res.eval())
if __name__ == "__main__":
test.main()
|
apache-2.0
| -4,272,495,883,738,696,000 | 672,863,105,960,012,300 | 37.961131 | 80 | 0.616361 | false |
youssef-emad/shogun
|
examples/undocumented/python_modular/classifier_multiclass_ecoc.py
|
24
|
2869
|
#!/usr/bin/env python
import re
import time
from tools.multiclass_shared import prepare_data
# run with toy data
[traindat, label_traindat, testdat, label_testdat] = prepare_data()
# run with opt-digits if available
#[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)
parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5]]
def classifier_multiclass_ecoc (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,lawidth=2.1,C=1,epsilon=1e-5):
import modshogun
from modshogun import ECOCStrategy, LibLinear, L2R_L2LOSS_SVC, LinearMulticlassMachine
from modshogun import MulticlassAccuracy
from modshogun import RealFeatures, MulticlassLabels
def nonabstract_class(name):
try:
getattr(modshogun, name)()
except TypeError:
return False
return True
encoders = [x for x in dir(modshogun)
if re.match(r'ECOC.+Encoder', x) and nonabstract_class(x)]
decoders = [x for x in dir(modshogun)
if re.match(r'ECOC.+Decoder', x) and nonabstract_class(x)]
fea_train = RealFeatures(fm_train_real)
fea_test = RealFeatures(fm_test_real)
gnd_train = MulticlassLabels(label_train_multiclass)
if label_test_multiclass is None:
gnd_test = None
else:
gnd_test = MulticlassLabels(label_test_multiclass)
base_classifier = LibLinear(L2R_L2LOSS_SVC)
base_classifier.set_bias_enabled(True)
#print('Testing with %d encoders and %d decoders' % (len(encoders), len(decoders)))
#print('-' * 70)
#format_str = '%%15s + %%-10s %%-10%s %%-10%s %%-10%s'
#print((format_str % ('s', 's', 's')) % ('encoder', 'decoder', 'codelen', 'time', 'accuracy'))
def run_ecoc(ier, idr):
encoder = getattr(modshogun, encoders[ier])()
decoder = getattr(modshogun, decoders[idr])()
# whether encoder is data dependent
if hasattr(encoder, 'set_labels'):
encoder.set_labels(gnd_train)
encoder.set_features(fea_train)
strategy = ECOCStrategy(encoder, decoder)
classifier = LinearMulticlassMachine(strategy, fea_train, base_classifier, gnd_train)
classifier.train()
label_pred = classifier.apply(fea_test)
if gnd_test is not None:
evaluator = MulticlassAccuracy()
acc = evaluator.evaluate(label_pred, gnd_test)
else:
acc = None
return (classifier.get_num_machines(), acc)
for ier in range(len(encoders)):
for idr in range(len(decoders)):
t_begin = time.clock()
(codelen, acc) = run_ecoc(ier, idr)
if acc is None:
acc_fmt = 's'
acc = 'N/A'
else:
acc_fmt = '.4f'
t_elapse = time.clock() - t_begin
#print((format_str % ('d', '.3f', acc_fmt)) %
# (encoders[ier][4:-7], decoders[idr][4:-7], codelen, t_elapse, acc))
if __name__=='__main__':
print('MulticlassECOC')
classifier_multiclass_ecoc(*parameter_list[0])
|
gpl-3.0
| -8,234,155,351,363,457,000 | -6,892,794,526,558,041,000 | 31.977011 | 180 | 0.682816 | false |
Phonebooth/depot_tools
|
third_party/coverage/results.py
|
49
|
10023
|
"""Results of coverage measurement."""
import os
from coverage.backward import iitems, set, sorted # pylint: disable=W0622
from coverage.misc import format_lines, join_regex, NoSource
from coverage.parser import CodeParser
class Analysis(object):
"""The results of analyzing a code unit."""
def __init__(self, cov, code_unit):
self.coverage = cov
self.code_unit = code_unit
self.filename = self.code_unit.filename
actual_filename, source = self.find_source(self.filename)
self.parser = CodeParser(
text=source, filename=actual_filename,
exclude=self.coverage._exclude_regex('exclude')
)
self.statements, self.excluded = self.parser.parse_source()
# Identify missing statements.
executed = self.coverage.data.executed_lines(self.filename)
exec1 = self.parser.first_lines(executed)
self.missing = sorted(set(self.statements) - set(exec1))
if self.coverage.data.has_arcs():
self.no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
n_partial_branches = sum(
[len(v) for k,v in iitems(mba) if k not in self.missing]
)
n_missing_branches = sum([len(v) for k,v in iitems(mba)])
else:
n_branches = n_partial_branches = n_missing_branches = 0
self.no_branch = set()
self.numbers = Numbers(
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
n_partial_branches=n_partial_branches,
n_missing_branches=n_missing_branches,
)
def find_source(self, filename):
"""Find the source for `filename`.
Returns two values: the actual filename, and the source.
The source returned depends on which of these cases holds:
* The filename seems to be a non-source file: returns None
* The filename is a source file, and actually exists: returns None.
* The filename is a source file, and is in a zip file or egg:
returns the source.
* The filename is a source file, but couldn't be found: raises
`NoSource`.
"""
source = None
base, ext = os.path.splitext(filename)
TRY_EXTS = {
'.py': ['.py', '.pyw'],
'.pyw': ['.pyw'],
}
try_exts = TRY_EXTS.get(ext)
if not try_exts:
return filename, None
for try_ext in try_exts:
try_filename = base + try_ext
if os.path.exists(try_filename):
return try_filename, None
source = self.coverage.file_locator.get_zip_data(try_filename)
if source:
return try_filename, source
raise NoSource("No source for code: '%s'" % filename)
def missing_formatted(self):
"""The missing line numbers, formatted nicely.
Returns a string like "1-2, 5-11, 13-14".
"""
return format_lines(self.statements, self.missing)
def has_arcs(self):
"""Were arcs measured in this result?"""
return self.coverage.data.has_arcs()
def arc_possibilities(self):
"""Returns a sorted list of the arcs in the code."""
arcs = self.parser.arcs()
return arcs
def arcs_executed(self):
"""Returns a sorted list of the arcs actually executed in the code."""
executed = self.coverage.data.executed_arcs(self.filename)
m2fl = self.parser.first_line
executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed]
return sorted(executed)
def arcs_missing(self):
"""Returns a sorted list of the arcs in the code not executed."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = [
p for p in possible
if p not in executed
and p[0] not in self.no_branch
]
return sorted(missing)
def arcs_unpredicted(self):
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
# Exclude arcs here which connect a line to itself. They can occur
# in executed data in some cases. This is where they can cause
# trouble, and here is where it's the least burden to remove them.
unpredicted = [
e for e in executed
if e not in possible
and e[0] != e[1]
]
return sorted(unpredicted)
def branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
exit_counts = self.parser.exit_counts()
return [l1 for l1,count in iitems(exit_counts) if count > 1]
def total_branches(self):
"""How many total branches are there?"""
exit_counts = self.parser.exit_counts()
return sum([count for count in exit_counts.values() if count > 1])
def missing_branch_arcs(self):
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
"""
missing = self.arcs_missing()
branch_lines = set(self.branch_lines())
mba = {}
for l1, l2 in missing:
if l1 in branch_lines:
if l1 not in mba:
mba[l1] = []
mba[l1].append(l2)
return mba
def branch_stats(self):
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
"""
exit_counts = self.parser.exit_counts()
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self.branch_lines():
exits = exit_counts[lnum]
try:
missing = len(missing_arcs[lnum])
except KeyError:
missing = 0
stats[lnum] = (exits, exits - missing)
return stats
class Numbers(object):
"""The numerical results of measuring coverage.
This holds the basic statistics from `Analysis`, and is used to roll
up statistics across files.
"""
# A global to determine the precision on coverage percentages, the number
# of decimal places.
_precision = 0
_near0 = 1.0 # These will change when _precision is changed.
_near100 = 99.0
def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
n_branches=0, n_partial_branches=0, n_missing_branches=0
):
self.n_files = n_files
self.n_statements = n_statements
self.n_excluded = n_excluded
self.n_missing = n_missing
self.n_branches = n_branches
self.n_partial_branches = n_partial_branches
self.n_missing_branches = n_missing_branches
def set_precision(cls, precision):
"""Set the number of decimal places used to report percentages."""
assert 0 <= precision < 10
cls._precision = precision
cls._near0 = 1.0 / 10**precision
cls._near100 = 100.0 - cls._near0
set_precision = classmethod(set_precision)
def _get_n_executed(self):
"""Returns the number of executed statements."""
return self.n_statements - self.n_missing
n_executed = property(_get_n_executed)
def _get_n_executed_branches(self):
"""Returns the number of executed branches."""
return self.n_branches - self.n_missing_branches
n_executed_branches = property(_get_n_executed_branches)
def _get_pc_covered(self):
"""Returns a single percentage value for coverage."""
if self.n_statements > 0:
pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) /
(self.n_statements + self.n_branches))
else:
pc_cov = 100.0
return pc_cov
pc_covered = property(_get_pc_covered)
def _get_pc_covered_str(self):
"""Returns the percent covered, as a string, without a percent sign.
Note that "0" is only returned when the value is truly zero, and "100"
is only returned when the value is truly 100. Rounding can never
result in either "0" or "100".
"""
pc = self.pc_covered
if 0 < pc < self._near0:
pc = self._near0
elif self._near100 < pc < 100:
pc = self._near100
else:
pc = round(pc, self._precision)
return "%.*f" % (self._precision, pc)
pc_covered_str = property(_get_pc_covered_str)
def pc_str_width(cls):
"""How many characters wide can pc_covered_str be?"""
width = 3 # "100"
if cls._precision > 0:
width += 1 + cls._precision
return width
pc_str_width = classmethod(pc_str_width)
def __add__(self, other):
nums = Numbers()
nums.n_files = self.n_files + other.n_files
nums.n_statements = self.n_statements + other.n_statements
nums.n_excluded = self.n_excluded + other.n_excluded
nums.n_missing = self.n_missing + other.n_missing
nums.n_branches = self.n_branches + other.n_branches
nums.n_partial_branches = (
self.n_partial_branches + other.n_partial_branches
)
nums.n_missing_branches = (
self.n_missing_branches + other.n_missing_branches
)
return nums
def __radd__(self, other):
# Implementing 0+Numbers allows us to sum() a list of Numbers.
if other == 0:
return self
return NotImplemented
|
bsd-3-clause
| 2,548,164,315,008,775,000 | 5,898,318,262,026,675,000 | 34.045455 | 79 | 0.577572 | false |
Telthor/cppDonorSimulation
|
donorTests/lib/googletest-master/googletest/test/gtest_filter_unittest.py
|
364
|
21325
|
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(set(set_var), set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(set(tests_to_run) - set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
| -2,225,126,055,657,252,000 | 5,913,750,520,283,555,000 | 32.529874 | 80 | 0.66354 | false |
sodafree/backend
|
build/lib.linux-i686-2.7/django/conf/locale/sl/formats.py
|
257
|
1834
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
| 6,546,191,408,734,191,000 | 714,019,340,851,233,300 | 36.428571 | 77 | 0.444929 | false |
stansonhealth/ansible-modules-core
|
cloud/openstack/_nova_keypair.py
|
41
|
5486
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
# (c) 2013, John Dewey <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
from novaclient import exceptions as exc
HAS_NOVACLIENT = True
except ImportError:
HAS_NOVACLIENT = False
DOCUMENTATION = '''
---
module: nova_keypair
version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Michael DeHaan"
deprecated: Deprecated in 2.0. Use os_keypair instead
short_description: Add/Delete key pair from nova
description:
- Add or Remove key pair from nova .
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected to vm's upon creation
required: false
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- nova_keypair: state=present login_username=admin
login_password=admin login_tenant_name=admin name=ansible_key
public_key={{ lookup('file','~/.ssh/id_rsa.pub') }}
# Creates a new key pair and the private key returned after the run.
- nova_keypair: state=present login_username=admin login_password=admin
login_tenant_name=admin name=ansible_key
'''
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
public_key = dict(default=None),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_NOVACLIENT:
module.fail_json(msg='python-novaclient is required for this module to work')
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
module.params['login_tenant_name'],
module.params['auth_url'],
region_name=module.params['region_name'],
service_type='compute')
try:
nova.authenticate()
except exc.Unauthorized as e:
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
except exc.AuthorizationFailure as e:
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
if module.params['state'] == 'present':
for key in nova.keypairs.list():
if key.name == module.params['name']:
if module.params['public_key'] and (module.params['public_key'] != key.public_key ):
module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name']))
else:
module.exit_json(changed = False, result = "Key present")
try:
key = nova.keypairs.create(module.params['name'], module.params['public_key'])
except Exception as e:
module.exit_json(msg = "Error in creating the keypair: %s" % e.message)
if not module.params['public_key']:
module.exit_json(changed = True, key = key.private_key)
module.exit_json(changed = True, key = None)
if module.params['state'] == 'absent':
for key in nova.keypairs.list():
if key.name == module.params['name']:
try:
nova.keypairs.delete(module.params['name'])
except Exception as e:
module.fail_json(msg = "The keypair deletion has failed: %s" % e.message)
module.exit_json( changed = True, result = "deleted")
module.exit_json(changed = False, result = "not present")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
| 3,403,627,564,955,874,300 | -5,309,875,733,997,877,000 | 35.818792 | 138 | 0.619577 | false |
Dino0631/RedRain-Bot
|
lib/pip/_vendor/requests/packages/urllib3/exceptions.py
|
515
|
5599
|
from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
|
gpl-3.0
| 2,451,722,136,313,627,000 | 7,359,411,744,972,739,000 | 25.789474 | 84 | 0.701732 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.