repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ianastewart/cwltc-admin | mysite/settings/staging.py | 1 | 3443 | from .base import *
DEBUG = False
LIVE_GO_CARDLESS = False
LIVE_MAIL = False
SITE_NAME = os.path.basename(__file__).title()
env_path = os.path.join(BASE_DIR, ".env")
environ.Env.read_env(env_path)
INSTALLED_APPS += ["raven.contrib.django.raven_compat"]
DATABASES = {"default": env.db_url("DATABASE_URL")}
ALLOWED_HOSTS = ["django.iskt.co.uk"]
SECURE_SSL_REDIRECT = False
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
STATIC_ROOT = os.path.join(BASE_DIR, "static_files/")
SECRET_KEY = env.str("SECRET_KEY")
BEE_FREE_ID = env.str("BEE_FREE_ID")
BEE_FREE_SECRET = env.str("BEE_FREE_SECRET")
POS_COOKIE = env.str("POS_COOKIE")
if LIVE_MAIL:
print("Warning - Live mail")
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = env.dict("ANYMAIL")
else:
EMAIL_BACKEND = "django.core.mail.backends.dummy.EmailBackend"
if LIVE_GO_CARDLESS:
CARDLESS_ACCESS_TOKEN = env.str("CARDLESS_PRODUCTION_TOKEN")
CARDLESS_ENVIRONMENT = "live"
CARDLESS_WEBHOOK_SECRET = env.str("CARDLESS_WEBHOOK_SECRET")
print("WARNING - LIVE Go Cardless site")
else:
CARDLESS_ACCESS_TOKEN = env.str("CARDLESS_SANDBOX_TOKEN")
CARDLESS_ENVIRONMENT = "sandbox"
CARDLESS_WEBHOOK_SECRET = env.str("CARDLESS_WEBHOOK_SECRET")
# BROKER_URL = env.str('BROKER_URL')
RAVEN_CONFIG = {"dsn": env.str("RAVEN")}
RAVEN_CONFIG = {"dsn": env.str("RAVEN")}
# https://www.webforefront.com/django/setupdjangologging.html
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"simple": {"format": "[%(asctime)s] %(levelname)s %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S"},
"verbose": {
"format": "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
"formatter": "simple",
},
"production_logfile": {
"level": "INFO",
"filters": ["require_debug_false"],
"class": "logging.handlers.RotatingFileHandler",
"filename": "./logs/django.log",
"maxBytes": 1024 * 1024 * 10, # 10MB
"backupCount": 5,
"formatter": "simple",
},
"sentry": {
"level": "ERROR", # To capture more than ERROR, change to WARNING, INFO, etc.
"filters": ["require_debug_false"],
"class": "raven.contrib.django.raven_compat.handlers.SentryHandler",
"tags": {"custom-tag": SITE_NAME},
},
},
"root": {"level": "DEBUG", "handlers": ["console"]},
"loggers": {
"members": {"handlers": ["production_logfile", "sentry"]},
"django": {"handlers": ["console", "sentry"], "propagate": True},
# stop sentry logging disallowed host
"django.security.DisallowedHost": {"handlers": ["console"], "propagate": False},
"django.request": { # debug logging of things that break requests
"handlers": ["production_logfile", "sentry"],
"level": "DEBUG",
"propagate": True,
},
},
"py.warnings": {"handlers": ["console"]},
}
| mit | 1,029,526,716,826,281,500 | 33.089109 | 104 | 0.584084 | false |
jakevdp/altair | altair/utils/deprecation.py | 1 | 1447 | import warnings
# import functools
class AltairDeprecationWarning(UserWarning):
pass
def _deprecated(obj, name=None, message=None):
"""Return a version of a class or function that raises a deprecation warning.
Parameters
----------
obj : class or function
The object to create a deprecated version of.
name : string (optional)
The name of the deprecated object
message : string (optional)
The deprecation message
Returns
-------
deprecated_obj :
The deprecated version of obj
Examples
--------
>>> class Foo(object): pass
>>> OldFoo = _deprecated(Foo, "OldFoo")
>>> f = OldFoo() # doctest: +SKIP
AltairDeprecationWarning: alt.OldFoo is deprecated. Use alt.Foo instead.
"""
if message is None:
message = ("alt.{} is deprecated. Use alt.{} instead."
"".format(name, obj.__name__))
if isinstance(obj, type):
return type(name, (obj,),
{'__doc__': obj.__doc__,
'__init__': _deprecated(obj.__init__, "__init__", message)})
elif callable(obj):
# @functools.wraps(obj) # TODO: use this in Py3 only
def new_obj(*args, **kwargs):
warnings.warn(message, AltairDeprecationWarning)
return obj(*args, **kwargs)
return new_obj
else:
raise ValueError("Cannot deprecate object of type {}".format(type(obj)))
| bsd-3-clause | 4,549,240,084,075,891,000 | 29.787234 | 81 | 0.583967 | false |
jamielennox/python-kiteclient | kiteclient/tests/v1/test_esek.py | 1 | 3094 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_esek
----------------------------------
Tests for `esek` module.
"""
from kiteclient.openstack.common.crypto import utils as cryptoutils
from kiteclient.tests import base
from kiteclient.tests.v1 import utils
from kiteclient.v1 import esek
from kiteclient.v1 import key
import base64
import six
class TestEsek(base.TestCase):
def setUp(self):
super(base.TestCase, self).setUp()
key_ses = utils.DummyKeyResponse(gen=20)
skey_data = "gTqLlW7x2oyNi3k+9YXTpQ=="
self.srckey = key.Key('testkey', skey_data, session=key_ses)
dkey_data = "uoUUn/+ZL+hNUwJ0cxTScg=="
self.dstkey = key.Key('destkey', dkey_data, session=key_ses)
self.skey = "uZnhYaRtzA7QdnDN1hVSWw=="
self.ekey = "fAlG9eGL44ew6q8uTMMKJw=="
self.esek_data = (
"LZ6WWNvCot49sEhnwn0Is/xGWYGQF72rCw8emEKHGmZpDcSQ4K0c5Ld0+fmR"
"T8PjzozEzWK97gNJQHZWSAh1JhmvMO+bjkUNlEdepOjTXrIW6QxdNvMY+Bkd"
"dDwrkKga4wZnoGgeMgK+B7cdGsQ8yAPE3vDjbpmIOvHjHXniCUs=")
def _encrypt(self, data):
crypto = cryptoutils.SymmetricCrypto(enctype='AES',
hashtype='SHA256')
enc = crypto.encrypt(base64.b64decode(self.ekey),
six.b(data), b64encode=True)
sig = crypto.sign(base64.b64decode(self.skey),
six.b(data), b64encode=True)
return enc, sig
def test_integrity(self):
esek_obj = esek.Esek(self.srckey.key_name,
self.dstkey,
self.esek_data)
b64_sig_key = base64.b64encode(esek_obj.sig_key)
b64_enc_key = base64.b64encode(esek_obj.enc_key)
self.assertEqual(six.b(self.skey), b64_sig_key)
self.assertEqual(six.b(self.ekey), b64_enc_key)
def test_decryption(self):
esek_obj = esek.Esek(self.srckey.key_name,
self.dstkey,
self.esek_data)
message = "MESSAGE"
enc, sig = self._encrypt(message)
new_message = esek_obj.decrypt(enc, sig)
self.assertEqual(six.b(message), new_message)
def test_bad_signature_throws(self):
esek_obj = esek.Esek(self.srckey.key_name,
self.dstkey,
self.esek_data)
message = "MESSAGE"
enc, _ = self._encrypt(message)
sig = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
self.assertRaises(ValueError, esek_obj.decrypt, enc, sig) | apache-2.0 | -4,911,584,131,548,145,000 | 34.988372 | 75 | 0.621526 | false |
kd0aij/matrixpilot_old | Tools/MAVLink/MAVProxy/modules/antenna.py | 1 | 2346 | #!/usr/bin/env python
'''
antenna pointing module
Andrew Tridgell
June 2012
'''
import sys, os, time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'cuav', 'lib'))
import cuav_util
mpstate = None
class module_state(object):
def __init__(self):
self.gcs_location = None
self.last_bearing = 0
self.last_announce = 0
def name():
'''return module name'''
return "antenna"
def description():
'''return module description'''
return "antenna pointing module"
def cmd_antenna(args):
'''set gcs location'''
state = mpstate.antenna_state
usage = "antenna lat lon"
if len(args) != 2:
if state.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(state.gcs_location))
return
state.gcs_location = (float(args[0]), float(args[1]))
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.antenna_state = module_state()
mpstate.command_map['antenna'] = (cmd_antenna, "antenna link control")
def unload():
'''unload module'''
pass
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
state = mpstate.antenna_state
if state.gcs_location is None and mpstate.status.wploader.count() > 0:
home = mpstate.status.wploader.wp(0)
mpstate.antenna_state.gcs_location = (home.x, home.y)
print("Antenna home set")
if state.gcs_location is None:
return
if m.get_type() == 'GPS_RAW' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat, m.lon)
elif m.get_type() == 'GPS_RAW_INT' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat/1.0e7, m.lon/1.0e7)
else:
return
mpstate.console.set_status('Antenna', 'Antenna %.0f' % bearing, row=0)
if abs(bearing - state.last_bearing) > 5 and (time.time() - state.last_announce) > 15:
state.last_bearing = bearing
state.last_announce = time.time()
mpstate.functions.say("Antenna %u" % int(bearing+0.5))
| gpl-3.0 | 5,135,345,788,596,944,000 | 30.583333 | 104 | 0.602728 | false |
AlexV1990/saveall | utils/modconf.py | 1 | 7590 | #! /usr/bin/env python3
# coding: utf-8
'''
Fonctions de manipulation et vérifications du fichier de configuration
'''
import json
import utils.misc as misc
CONF_FILE_NAME = "conf/conf.json"
'''
check_conf_valid: vérifie que le fichier de conf est bien dans un format json valide
entrée: pas d'argument (nom du fichier dépend de la variable globale CONF_FILE_NAME)
sortie: retourne 0 si le fichier est valide, -1 sinon
'''
def check_conf_valid():
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
return 0
except:
return -1
'''
get_list_equipment_from_conf: renvoie la liste des équipements contenus dans le fichier conf.json
entrée: pas d'argument
sortie: liste de tuples (nom de l'equipement, ip de l'equipement)
'''
def get_list_equipment_from_conf():
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
list_eq = []
for dat in data["EQUIPEMENTS"]:
var_nom = str(data["EQUIPEMENTS"][dat]["NOM"])
var_ip = str(data["EQUIPEMENTS"][dat]["IP"])
tuple_eq = (var_nom, var_ip)
list_eq.append(tuple_eq)
return list_eq
'''
get_list_equipment_from_conf: renvoie la liste des équipements contenus dans le fichier conf.json
entrée: pas d'argument
sortie: liste de tuples (nom de l'equipement, "", False)
'''
def get_list_equipment_from_conf_for_checklist():
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
list_eq = []
for dat in data["EQUIPEMENTS"]:
var_nom = str(data["EQUIPEMENTS"][dat]["NOM"])
tuple_eq = (var_nom, "", False)
list_eq.append(tuple_eq)
return list_eq
'''
get_list_files_from_conf: renvoie la liste des fichiers contenus dans le fichier conf.json
entrée: pas d'argument
sortie: liste de tuples (nom de l'equipement, ip de l'equipement)
'''
def get_list_files_from_conf():
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
list_fic = []
for dat in data["FICHIERS"]:
var_nom = str(data["FICHIERS"][dat]["NOM"])
var_path = str(data["FICHIERS"][dat]["PATH"])
tuple_eq = (var_nom, var_path)
list_fic.append(tuple_eq)
return list_fic
'''
delete_file_from_conf: supprime un fichier du fichier de configuration
entrée: nom du fichier à supprimer
sortie: 0 si OK, -1 autrement
'''
def delete_file_from_conf(file_name):
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
for element in data["FICHIERS"]:
if file_name == data["FICHIERS"][element]["NOM"]:
data["FICHIERS"].pop(element)
break
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
delete_equipment_from_conf: supprime un équipement du fichier de configuration
entrée: nom de l'équipement à supprimer
sortie: 0 si OK, -1 autrement
'''
def delete_equipment_from_conf(equipment_name):
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
for element in data["EQUIPEMENTS"]:
if equipment_name == data["EQUIPEMENTS"][element]["NOM"]:
data["EQUIPEMENTS"].pop(element)
break
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
add_file_to_conf: ajoute un fichier dans le fichier de configuration
entrée: liste avec les paramètres du fichier [nom, path, type, equipement]
sortie: 0 si OK, -1 si le nom existe déjà, -2 si autre erreur
'''
def add_file_to_conf(list_params_file):
file_name = list_params_file[0]
file_path = list_params_file[1]
file_type = list_params_file[2]
equipment_name = list_params_file[3]
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
#vérification de l'unicité du nom
for element in data["FICHIERS"]:
if file_name == data["FICHIERS"][element]["NOM"]:
return -1
#on formate les paramètres du fichier en JSON
data["FICHIERS"][file_name] = {}
data["FICHIERS"][file_name]["NOM"] = file_name
data["FICHIERS"][file_name]["TYPE"] = file_type
data["FICHIERS"][file_name]["EQUIPEMENT"] = equipment_name
data["FICHIERS"][file_name]["PATH"] = file_path
#On modifie le fichier de configuration
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
add_equipment_to_conf: ajoute un équipement dans le fichier de configuration
entrée: liste avec les paramètres de l'équipement [nom, IP, type, login, MDP]
sortie: 0 si OK, -1 si le nom existe déjà, -2 si autre erreur
'''
def add_equipment_to_conf(list_params_equipment):
equipment_name = list_params_equipment[0]
equipment_ip = list_params_equipment[1]
equipment_type = list_params_equipment[2]
equipment_login = list_params_equipment[3]
equipment_mdp = list_params_equipment[4]
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
#vérification de l'unicité du nom
for element in data["EQUIPEMENTS"]:
if equipment_name == data["EQUIPEMENTS"][element]["NOM"]:
return -1
#on formate les paramètres du fichier en JSON
data["EQUIPEMENTS"][equipment_name] = {}
data["EQUIPEMENTS"][equipment_name]["NOM"] = equipment_name
data["EQUIPEMENTS"][equipment_name]["IP"] = equipment_ip
data["EQUIPEMENTS"][equipment_name]["TYPE"] = equipment_type
data["EQUIPEMENTS"][equipment_name]["LOGIN"] = equipment_login
data["EQUIPEMENTS"][equipment_name]["MDP"] = equipment_mdp
#On modifie le fichier de configuration
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
check_list_equipment_valid: vérifie que la demande de création d'ajout d'un équipement est valide
entrée: liste de paramètres concernant l'équipement [nom, IP, type, login, MDP]
sortie: retourne 0 si l'équipement peut être ajouté
-1 si le nom de l'équipement n'est pas unique
-2 si l'IP fournie n'est pas valable
-3 si l'IP n'est pas unique
-4 si le type n'est pas "DB" (base de données), "S" (serveur), "R" (équipement réseau)
-5 si tous les champs ne sont pas remplis
'''
def check_list_equipment_valid(list_params_equipment):
equipment_name = list_params_equipment[0]
equipment_ip = list_params_equipment[1]
equipment_type = list_params_equipment[2]
equipment_login = list_params_equipment[3]
equipment_mdp = list_params_equipment[4]
#Vérification que tous les champs sont remplis
if equipment_name == "" or equipment_ip == "" or equipment_type == "" or equipment_login == "" or equipment_mdp == "":
return -5
#Ouverture du fichier de conf
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
#Vérification de l'unicité du nom
if equipment_name in data["EQUIPEMENTS"]:
return -1
#Vérification de la validité de l'IP
if misc.is_valid_ipv4_address(equipment_ip) == False:
return -2
#Vérification de l'unicité de l'IP dans le fichier de conf
for element in data["EQUIPEMENTS"]:
if equipment_ip in data["EQUIPEMENTS"][element]["IP"]:
return -3
#Vérification du type d'équipement
if equipment_type != "DB" and equipment_type != "S" and equipment_type != "R":
return -4
return 0
| mit | -9,037,533,644,414,211,000 | 25.170139 | 120 | 0.655699 | false |
kubernetes-client/python | kubernetes/client/models/v1alpha1_webhook_throttle_config.py | 1 | 4435 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1WebhookThrottleConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'burst': 'int',
'qps': 'int'
}
attribute_map = {
'burst': 'burst',
'qps': 'qps'
}
def __init__(self, burst=None, qps=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1WebhookThrottleConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._burst = None
self._qps = None
self.discriminator = None
if burst is not None:
self.burst = burst
if qps is not None:
self.qps = qps
@property
def burst(self):
"""Gets the burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:return: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._burst
@burst.setter
def burst(self, burst):
"""Sets the burst of this V1alpha1WebhookThrottleConfig.
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:param burst: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._burst = burst
@property
def qps(self):
"""Gets the qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:return: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._qps
@qps.setter
def qps(self, qps):
"""Sets the qps of this V1alpha1WebhookThrottleConfig.
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:param qps: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._qps = qps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | -7,392,792,150,688,209,000 | 28.566667 | 124 | 0.583089 | false |
adamcaudill/yawast | yawast/scanner/plugins/http/applications/wordpress.py | 1 | 7344 | # Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import re
from typing import Tuple, Union, List, cast
from urllib.parse import urljoin
from packaging import version
from requests import Response
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.evidence import Evidence
from yawast.scanner.plugins.http import version_checker, response_scanner
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output
def identify(url: str) -> Tuple[Union[str, None], List[Result]]:
results = []
# find WordPress
res, path = _identify_by_path(url, "")
if path is None:
res, path = _identify_by_path(url, "blog/")
# check to see if we have a valid hit
if path is not None:
# we have a WordPress install, let's see if we can get a version
body = res.text
ver = "Unknown"
# this works for modern versions
m = re.search(r"login.min.css\?ver=\d+\.\d+\.?\d*", body)
if m:
ver = m.group(0).split("=")[1]
else:
# the current method doesn't work, fall back to an older method
m = re.search(r"load-styles.php\?[\w,;=&%]+;ver=\d+\.\d+\.?\d*", body)
if m:
ver = m.group(0).split("=")[-1]
# report that we found WordPress
results.append(
Result.from_evidence(
Evidence.from_response(res, {"version": ver}),
f"Found WordPress v{ver} at {path}",
Vulnerabilities.APP_WORDPRESS_VERSION,
)
)
# is this a current version?
ver = cast(version.Version, version.parse(ver))
curr_version = version_checker.get_latest_version("wordpress", ver)
if curr_version is not None and curr_version > ver:
results.append(
Result.from_evidence(
Evidence.from_response(
res,
{
"installed_version": str(ver),
"current_verison": str(curr_version),
},
),
f"WordPress Outdated: {ver} - Current: {curr_version}",
Vulnerabilities.APP_WORDPRESS_OUTDATED,
)
)
return path, results
else:
return None, []
def check_path_disclosure(wp_url: str) -> List[Result]:
# this is a list of files that are known to throw a fatal error when accessed directly
# this is from a manual review of all plugins with at least 1M installs
urls = [
"wp-content/plugins/hello.php",
"wp-content/plugins/akismet/akismet.php",
"wp-content/plugins/contact-form-7/includes/capabilities.php",
"wp-content/plugins/wordpress-seo/admin/views/partial-alerts-errors.php",
"wp-content/plugins/jetpack/load-jetpack.php",
"wp-content/plugins/jetpack/uninstall.php",
"wp-content/plugins/duplicate-post/duplicate-post-admin.php",
"wp-content/plugins/wpforms-lite/includes/admin/class-welcome.php",
"wp-content/plugins/wp-google-maps/base/includes/welcome.php",
"wp-content/plugins/wp-super-cache/wp-cache.php",
"wp-content/plugins/mailchimp-for-wp/integrations/wpforms/bootstrap.php",
"wp-content/plugins/mailchimp-for-wp/integrations/bootstrap.php",
"wp-content/plugins/regenerate-thumbnails/regenerate-thumbnails.php",
"wp-content/plugins/advanced-custom-fields/includes/deprecated.php",
"wp-content/plugins/redirection/redirection.php",
"wp-content/plugins/wpforms-lite/includes/admin/importers/class-ninja-forms.php",
"wp-content/plugins/ninja-forms/includes/deprecated.php",
"wp-content/plugins/so-widgets-bundle/so-widgets-bundle.php",
"wp-content/plugins/wp-fastest-cache/templates/preload.php",
"wp-content/plugins/duplicate-page/duplicatepage.php",
"wp-content/plugins/better-wp-security/better-wp-security.php",
"wp-content/plugins/all-in-one-wp-security-and-firewall/other-includes/wp-security-unlock-request.php",
"wp-content/plugins/related-posts/views/settings.php",
"wp-content/plugins/wpcontentguard/views/settings.php",
"wp-content/plugins/simple-social-icons/simple-social-icons.php",
]
results: List[Result] = []
for url in urls:
target = urljoin(wp_url, url)
head = network.http_head(target, False)
if head.status_code != 404:
resp = network.http_get(target, False)
if resp.status_code < 300 or resp.status_code >= 500:
# we have some kind of response that could be useful
if "<b>Fatal error</b>:" in resp.text:
# we have an error
pattern = r"<b>((\/|[A-Z]:\\).*.php)<\/b>"
if re.search(pattern, resp.text):
try:
path = re.findall(pattern, resp.text)[0][0]
results.append(
Result.from_evidence(
Evidence.from_response(resp, {"path": path}),
f"WordPress File Path Disclosure: {target} ({path})",
Vulnerabilities.APP_WORDPRESS_PATH_DISCLOSURE,
)
)
except Exception:
output.debug_exception()
results += response_scanner.check_response(target, resp)
return results
def check_json_user_enum(url: str) -> List[Result]:
results = []
target = urljoin(url, "wp-json/wp/v2/users")
res = network.http_get(target, False)
body = res.text
if res.status_code < 300 and "slug" in body:
data = res.json()
# log the enum finding
results.append(
Result.from_evidence(
Evidence.from_response(res),
f"WordPress WP-JSON User Enumeration at {target}",
Vulnerabilities.APP_WORDPRESS_USER_ENUM_API,
)
)
# log the individual users
for user in data:
results.append(
Result.from_evidence(
Evidence.from_response(
res,
{
"user_id": user["id"],
"user_slug": user["slug"],
"user_name": user["name"],
},
),
f"ID: {user['id']}\tUser Slug: '{user['slug']}'\t\tUser Name: '{user['name']}'",
Vulnerabilities.APP_WORDPRESS_USER_FOUND,
)
)
return results
def _identify_by_path(url: str, path: str) -> Tuple[Response, Union[str, None]]:
target = urljoin(url, f"{path}wp-login.php")
res = network.http_get(target, False)
body = res.text
if res.status_code == 200 and "Powered by WordPress" in body:
return res, urljoin(url, path)
else:
return res, None
| mit | 6,941,804,610,785,816,000 | 38.483871 | 111 | 0.564134 | false |
xuru/pyvisdk | pyvisdk/do/cluster_power_on_vm_result.py | 1 | 1098 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ClusterPowerOnVmResult(vim, *args, **kwargs):
'''PowerOnVmResult is the base class of the result returned to the
PowerOnMultiVM_Task method.'''
obj = vim.client.factory.create('ns0:ClusterPowerOnVmResult')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'attempted', 'notAttempted', 'recommendations', 'dynamicProperty',
'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | 4,821,704,323,753,035,000 | 30.4 | 124 | 0.603825 | false |
cysuncn/python | spark/crm/PROC_M_R_RET_CUST_FLOW.py | 1 | 4734 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_M_R_RET_CUST_FLOW').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#MCRM_RET_CUST_FLOW 增量 删除当天文件
ret = os.system("hdfs dfs -rm -r /"+dbname+"/MCRM_RET_CUST_FLOW/"+V_DT+".parquet")
MCRM_RET_CUST_ASSETS = sqlContext.read.parquet(hdfs+'/MCRM_RET_CUST_ASSETS/*')
MCRM_RET_CUST_ASSETS.registerTempTable("MCRM_RET_CUST_ASSETS")
ACRM_F_AG_AGREEMENT = sqlContext.read.parquet(hdfs+'/ACRM_F_AG_AGREEMENT/*')
ACRM_F_AG_AGREEMENT.registerTempTable("ACRM_F_AG_AGREEMENT")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT CUST_ID AS CUST_ID
,FR_ID AS FR_ID
,MIN(concat(SUBSTR(START_DATE, 1, 4),'-',SUBSTR(START_DATE, 6, 2),'-',SUBSTR(START_DATE, 9, 2))) AS OPEN_DATE
,MAX(concat(SUBSTR(END_DATE, 1, 4),'-',SUBSTR(END_DATE, 6, 2),'-',SUBSTR(END_DATE, 9, 2))) AS CANCEL_DATE
FROM ACRM_F_AG_AGREEMENT A --客户协议表
GROUP BY FR_ID
,CUST_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_MCRM_RET_CUST_FLOW_01 = sqlContext.sql(sql)
TMP_MCRM_RET_CUST_FLOW_01.registerTempTable("TMP_MCRM_RET_CUST_FLOW_01")
dfn="TMP_MCRM_RET_CUST_FLOW_01/"+V_DT+".parquet"
TMP_MCRM_RET_CUST_FLOW_01.cache()
nrows = TMP_MCRM_RET_CUST_FLOW_01.count()
TMP_MCRM_RET_CUST_FLOW_01.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_MCRM_RET_CUST_FLOW_01.unpersist()
ACRM_F_AG_AGREEMENT.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_MCRM_RET_CUST_FLOW_01/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_MCRM_RET_CUST_FLOW_01 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[11] 001-02::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUST_ID AS CUST_ID
,A.CUST_ZH_NAME AS CUST_ZH_NAME
,A.CUST_MANAGER AS CUST_MANAGER
,A.CUST_MANAGER_NAME AS CUST_MANAGER_NAME
,A.ORG_ID AS ORG_ID
,A.ORG_NAME AS ORG_NAME
,A.CUST_LEVEL AS CUST_LEVEL
,A.GRADE_DATE AS GRADE_DATE
,B.OPEN_DATE AS OPEN_DATE
,C.CANCEL_DATE AS CANCEL_DATE
,A.MONTH_BAL AS CUST_ASSETS
,A.OLD_CUST_LEVEL AS CUST_LEVEL_FU
,A.ST_DATE AS ST_DATE
,'' AS O_MAIN_TYPE
,'' AS M_MAIN_TYPE
FROM MCRM_RET_CUST_ASSETS A --客户资产情况表
LEFT JOIN TMP_MCRM_RET_CUST_FLOW_01 B --客户流入流出机构统计表临时表01
ON A.CUST_ID = B.CUST_ID
AND B.FR_ID = A.FR_ID
AND SUBSTR(B.OPEN_DATE, 1, 7) = SUBSTR(V_DT, 1, 7)
LEFT JOIN TMP_MCRM_RET_CUST_FLOW_01 C --客户流入流出机构统计表临时表01
ON A.CUST_ID = C.CUST_ID
AND C.FR_ID = A.FR_ID
AND SUBSTR(C.CANCEL_DATE, 1, 7) = SUBSTR(V_DT, 1, 7)
WHERE A.ST_DATE = V_DT """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
MCRM_RET_CUST_FLOW = sqlContext.sql(sql)
MCRM_RET_CUST_FLOW.registerTempTable("MCRM_RET_CUST_FLOW")
dfn="MCRM_RET_CUST_FLOW/"+V_DT+".parquet"
MCRM_RET_CUST_FLOW.cache()
nrows = MCRM_RET_CUST_FLOW.count()
MCRM_RET_CUST_FLOW.write.save(path=hdfs + '/' + dfn, mode='append')
MCRM_RET_CUST_FLOW.unpersist()
MCRM_RET_CUST_ASSETS.unpersist()
TMP_MCRM_RET_CUST_FLOW_01.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert MCRM_RET_CUST_FLOW lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | 6,747,739,261,989,934,000 | 42.150943 | 177 | 0.580892 | false |
thomastweets/PythonRSA | GUI_RSA.py | 1 | 15279 | ############################
### GUI for RS analysis ###
############################
import wx
import rsa
import os
import webbrowser
files_number = 0
class RSA_GUI(wx.Frame):
def __init__(self, parent, title):
super(RSA_GUI,self).__init__(parent, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER, title = title, size = (400,275))
self.InitUI()
self.Show(True)
def InitUI(self):
## Creates Status Bar
self.CreateStatusBar()
self.menuBar = wx.MenuBar()
self.filemenu = wx.Menu()
self.helpmenu = wx.Menu()
self.menuHelp = self.helpmenu.Append(wx.ID_ANY, "&Help", "Learn more about RSA and how to use this program")
self.menuAbout = self.helpmenu.Append(wx.ID_ABOUT, "&About", "Learn more about this program")
self.menuClear = self.filemenu.Append(wx.ID_ANY,"&Clear","Clear data")
self.filemenu.AppendSeparator()
self.menuExit = self.filemenu.Append(wx.ID_EXIT, "&Exit", "Terminate the program")
self.menuBar.Append(self.filemenu, "&File")
self.menuBar.Append(self.helpmenu, "&Help")
self.SetMenuBar(self.menuBar)
self.Bind(wx.EVT_MENU, self.OnAbout, self.menuAbout)
self.Bind(wx.EVT_MENU, self.OnHelp, self.menuHelp)
self.Bind(wx.EVT_MENU, self.OnExit, self.menuExit)
self.Bind(wx.EVT_MENU, self.OnClear, self.menuClear)
## buttons
self.panel = wx.Panel(self)
self.main_box = wx.BoxSizer(wx.VERTICAL)
file_box = wx.BoxSizer(wx.HORIZONTAL)
file_button = wx.Button(self.panel, label = 'Select files', size = (90, 30))
file_box.Add(file_button)
self.file_text = wx.TextCtrl(self.panel)
self.file_text.Disable()
file_box.Add(self.file_text, proportion = 1, flag = wx.EXPAND | wx.LEFT, border = 5)
self.main_box.Add(file_box, flag = wx.EXPAND | wx.ALL, border = 10)
self.main_box.Add((-1,10))
label_box = wx.BoxSizer(wx.HORIZONTAL)
label_button = wx.Button(self.panel, label = 'Conditions', size = (90, 30))
label_box.Add(label_button)
self.label_text = wx.TextCtrl(self.panel)
self.label_text.Disable()
label_box.Add(self.label_text, proportion = 1, flag = wx.EXPAND | wx.LEFT, border = 5)
self.main_box.Add(label_box, flag = wx. EXPAND | wx.RIGHT | wx.LEFT, border = 10)
self.main_box.Add((-1,30))
options_box = wx.BoxSizer(wx.HORIZONTAL)
options_button = wx.Button(self.panel, label='Options', size = (70, 30))
options_box.Add(options_button)
self.main_box.Add(options_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.main_box.Add((-1,10))
end_box = wx.BoxSizer(wx.HORIZONTAL)
self.go_btn = wx.Button(self.panel, label = 'Go', size = (70, 30))
self.go_btn.Disable()
end_box.Add(self.go_btn, flag = wx.BOTTOM, border = 5)
cancel_btn = wx.Button(self.panel, label = 'Cancel', size = (70, 30))
end_box.Add(cancel_btn, flag = wx.LEFT | wx.BOTTOM, border = 5)
self.main_box.Add(end_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.main_box)
self.Bind(wx.EVT_BUTTON, self.OnFiles, file_button)
self.Bind(wx.EVT_BUTTON, self.conditions, label_button)
self.Bind(wx.EVT_BUTTON, self.OnOptions, options_button)
self.go_btn.Bind(wx.EVT_BUTTON, self.OnGo)
self.Bind(wx.EVT_BUTTON, self.OnCancel, cancel_btn)
self.labels = []
self.files = []
self.Center()
def OnOptions(self, e):
self.new = OptionWindow(parent=None, id=-1)
self.new.Show()
def OnAbout(self, e):
dlg = wx.MessageDialog(self, "This is a program to perform a representational similarity analysis on functional magnetic resonance imaging data.\n\n"
"The analysis is following the principles described in the paper 'Representational Similarity Analysis - Connecting"
" the Branches of Systems Neuroscience' by Nikolaus Kriegeskorte, Marieke Mur and Peter Bandettini (2008). \n\nIt is the"
" result of a project work at Maastricht University by Pia Schroeder, Amelie Haugg and Julia Brehm under the supervision of Thomas Emmerling."
"\n\nFor correspondence please refer to https://github.com/thomastweets/PythonRSA", "About this program")
dlg.ShowModal()
dlg.Destroy()
def OnHelp(self, e):
webbrowser.open("https://github.com/thomastweets/PythonRSA/blob/master/README.md")
#dlg = wx.MessageDialog(self, "", "Help for this program")
#dlg.ShowModal()
#dlg.Destroy()
def OnExit(self, e):
self.Close(True)
def OnClear(self, e):
self.files = []
self.labels = []
self.file_text.ChangeValue(str(''))
self.label_text.ChangeValue(str(''))
rsa.matrix_plot1 = True
rsa.matrix_plot2 = False
rsa.bar_plot = False
rsa.correlations1 = False
rsa.correlations2 = False
rsa.pvalues = False
rsa.no_relabelings = 10000
rsa.dist_metric = 1
rsa.output_first = True
rsa.output_second = False
rsa.scale_to_max = False
global files_number
files_number = 0
self.go_btn.Disable()
def OnFiles(self, event):
dialog = wx.FileDialog(self, "Choose files:", os.getcwd(), " ","*.vom", wx.FD_OPEN|wx.FD_MULTIPLE)
self.files = []
if dialog.ShowModal() == wx.ID_OK:
self.paths = dialog.GetPaths()
# myfiles contains all the file names
for path in self.paths:
self.files.append(os.path.basename(path).encode("utf-8"))
global files_number
if len(self.files) > 1:
files_number = 1
else:
files_number = 0
if self.files:
self.file_text.ChangeValue(str(', '.join(self.files)))
self.go_btn.Enable()
dialog.Destroy()
def conditions(self, event):
self.textinput = wx.TextEntryDialog(self, "Type in condition names separated by a white space", "Condition labels")
if self.textinput.ShowModal() == wx.ID_OK:
self.input = self.textinput.GetValue()
# labels contains a list of all conditions
self.labels = self.input.split()
self.labels = [label.encode("utf-8") for label in self.labels]
if self.labels:
self.label_text.ChangeValue(str(', '.join(self.labels)))
self.textinput.Destroy()
def OnGo(self, e):
if self.labels == ['Tetris']:
import Tetris
else:
wait = wx.BusyCursor()
rsa.RSA(self.paths, self.files, self.labels)
del wait
def OnCancel(self, e):
self.Close(True)
class OptionWindow(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Options',
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MINIMIZE_BOX ^ wx.MAXIMIZE_BOX,
size=(400,500))
self.InitOpt()
def InitOpt(self):
self.panel = wx.Panel(self)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add((-1,20))
self.line1 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line1, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,10))
# Check box: First-order RDMs
self.RDM1_box = wx.BoxSizer(wx.HORIZONTAL)
self.RDM1_cb = wx.CheckBox(self.panel, label = 'First order RDMs')
self.RDM1_cb.SetValue(rsa.output_first)
self.RDM1_cb.Bind(wx.EVT_CHECKBOX, self.OnSelectRDM1)
self.RDM1_box.Add(self.RDM1_cb)
self.vbox.Add(self.RDM1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Matrix plots
self.mplot1_box = wx.BoxSizer(wx.HORIZONTAL)
self.mplot1_box.Add((25,-1))
self.mplot1_cb = wx.CheckBox(self.panel, label = 'Matrix plots')
self.mplot1_cb.SetValue(rsa.matrix_plot1)
self.mplot1_box.Add(self.mplot1_cb)
self.vbox.Add(self.mplot1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: First-order correlations
self.correlations1_box = wx.BoxSizer(wx.HORIZONTAL)
self.correlations1_box.Add((25,-1))
self.correlations1_cb = wx.CheckBox(self.panel, label = 'Correlations')
self.correlations1_cb.SetValue(rsa.correlations1)
self.correlations1_box.Add(self.correlations1_cb)
self.vbox.Add(self.correlations1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Scale to maximum distance
self.scale_box = wx.BoxSizer(wx.HORIZONTAL)
self.scale_box.Add((25,-1))
self.scale_cb = wx.CheckBox(self.panel, label='Scale to max')
self.scale_cb.SetValue(rsa.scale_to_max)
self.scale_box.Add(self.scale_cb)
self.vbox.Add(self.scale_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Dropdown menu for distance metric
self.drop_box = wx.BoxSizer(wx.HORIZONTAL)
self.drop_box.Add((25,-1))
self.drop_label = wx.StaticText(self.panel, label = 'Distance metric ')
self.drop_box.Add(self.drop_label)
self.distances = ['Correlation distance', 'Euclidean distance', 'Absolute activation difference']
self.dropdown = wx.ComboBox(self.panel, value = self.distances[rsa.dist_metric-1], choices = self.distances, style=wx.CB_READONLY)
self.drop_box.Add(self.dropdown)
self.vbox.Add(self.drop_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,20))
self.line2 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line2, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,10))
# Check box: Second-order RDM
self.RDM2_box = wx.BoxSizer(wx.HORIZONTAL)
self.RDM2_cb = wx.CheckBox(self.panel, label = 'Second order RDMs')
self.RDM2_cb.SetValue(rsa.output_second)
self.RDM2_cb.Bind(wx.EVT_CHECKBOX, self.OnSelectRDM2)
self.RDM2_box.Add(self.RDM2_cb)
self.vbox.Add(self.RDM2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# only checkable if you have chosen enough files
self.RDM2_cb.Disable()
if files_number == 1:
self.RDM2_cb.Enable()
# Check box: Matrix plots
self.mplot2_box = wx.BoxSizer(wx.HORIZONTAL)
self.mplot2_box.Add((25,-1))
self.mplot2_cb = wx.CheckBox(self.panel, label = 'Matrix plots')
self.mplot2_cb.SetValue(rsa.matrix_plot2)
self.mplot2_box.Add(self.mplot2_cb)
self.vbox.Add(self.mplot2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Bar plots
self.bplot_box = wx.BoxSizer(wx.HORIZONTAL)
self.bplot_box.Add((25,-1))
self.bplot_cb = wx.CheckBox(self.panel, label = 'Bar plots')
self.bplot_cb.SetValue(rsa.bar_plot)
self.bplot_box.Add(self.bplot_cb)
self.vbox.Add(self.bplot_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Second-order correlations
self.correlations2_box = wx.BoxSizer(wx.HORIZONTAL)
self.correlations2_box.Add((25,-1))
self.correlations2_cb = wx.CheckBox(self.panel, label = 'Correlations')
self.correlations2_cb.SetValue(rsa.correlations2)
self.correlations2_box.Add(self.correlations2_cb)
self.vbox.Add(self.correlations2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: p-values
self.p_box = wx.BoxSizer(wx.HORIZONTAL)
self.p_box.Add((25,-1))
self.p_cb = wx.CheckBox(self.panel, label='p-values')
self.p_cb.SetValue(rsa.pvalues)
self.p_box.Add(self.p_cb)
self.vbox.Add(self.p_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# No of permutations SpinControl
self.perm_box = wx.BoxSizer(wx.HORIZONTAL)
self.perm_box.Add((25,-1))
self.perm_label = wx.StaticText(self.panel, label = 'No. of Permutations ')
self.perm_box.Add(self.perm_label)
self.perm_spin = wx.SpinCtrl(self.panel, value=str(rsa.no_relabelings), min=100, max = 100000)
self.perm_box.Add(self.perm_spin, proportion = 1)
self.vbox.Add(self.perm_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
self.line3 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line3, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,50))
# Dis-/Enable options
self.OnSelectRDM1([])
self.OnSelectRDM2([])
# Done and Cancel Buttons
self.end_box = wx.BoxSizer(wx.HORIZONTAL)
self.done_btn = wx.Button(self.panel, label = 'Done', size = (70, 30))
self.done_btn.Bind(wx.EVT_BUTTON, self.OnDone)
self.end_box.Add(self.done_btn, flag = wx.BOTTOM, border = 5)
self.cancel_btn = wx.Button(self.panel, label = 'Cancel', size = (70, 30))
self.cancel_btn.Bind(wx.EVT_BUTTON, self.OnCancel)
self.end_box.Add(self.cancel_btn, flag = wx.LEFT | wx.BOTTOM, border = 5)
self.vbox.Add(self.end_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.vbox)
self.Center()
def OnSelectRDM1(self,e):
if self.RDM1_cb.GetValue():
self.mplot1_cb.Enable()
self.correlations1_cb.Enable()
self.scale_cb.Enable()
self.dropdown.Enable()
else:
self.mplot1_cb.Disable()
self.correlations1_cb.Disable()
self.scale_cb.Disable()
self.dropdown.Disable()
def OnSelectRDM2(self,e):
if self.RDM2_cb.GetValue() and files_number == 1:
self.bplot_cb.Enable()
self.mplot2_cb.Enable()
self.p_cb.Enable()
self.correlations2_cb.Enable()
self.perm_spin.Enable()
else:
self.bplot_cb.Disable()
self.p_cb.Disable()
self.perm_spin.Disable()
self.mplot2_cb.Disable()
self.correlations2_cb.Disable()
def OnDone(self,e):
rsa.output_first = self.RDM1_cb.GetValue()
rsa.output_second = self.RDM2_cb.GetValue()
rsa.matrix_plot1 = self.mplot1_cb.GetValue()
rsa.matrix_plot2 = self.mplot2_cb.GetValue()
rsa.bar_plot = self.bplot_cb.GetValue()
rsa.correlations1 = self.correlations1_cb.GetValue()
rsa.correlations2 = self.correlations2_cb.GetValue()
rsa.pvalues = self.p_cb.GetValue()
rsa.scale_to_max = self.scale_cb.GetValue()
rsa.no_relabelings = self.perm_spin.GetValue()
rsa.dist_metric = self.dropdown.GetSelection()+1
self.Close()
def OnCancel(self,e):
self.Close()
def main():
GUI = wx.App()
RSA_GUI(None, 'RSA')
GUI.MainLoop()
if __name__ == '__main__':
main()
| gpl-2.0 | 8,023,130,351,131,065,000 | 35.995157 | 179 | 0.59729 | false |
Vijfhoek/oyoyo | oyoyo/cmdhandler.py | 1 | 6875 | # Copyright (c) 2008 Duncan Fordyce
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import inspect
import logging
import sys
import traceback
from oyoyo import helpers
from oyoyo.parse import parse_nick
# Python < 3 compatibility
if sys.version_info < (3,):
class bytes(object):
def __new__(self, b='', encoding='utf8'):
return str(b)
def protected(func):
""" decorator to protect functions from being called """
func.protected = True
return func
class CommandError(Exception):
def __init__(self, cmd):
self.cmd = cmd
class NoSuchCommandError(CommandError):
def __str__(self):
return 'No such command "%s"' % ".".join(self.cmd)
class ProtectedCommandError(CommandError):
def __str__(self):
return 'Command "%s" is protected' % ".".join(self.cmd)
class CommandHandler(object):
""" The most basic CommandHandler """
def __init__(self, client):
self.client = client
@protected
def get(self, in_command_parts):
""" finds a command
commands may be dotted. each command part is checked that it does
not start with and underscore and does not have an attribute
"protected". if either of these is true, ProtectedCommandError
is raised.
its possible to pass both "command.sub.func" and
["command", "sub", "func"].
"""
if isinstance(in_command_parts, (str, bytes)):
in_command_parts = in_command_parts.split(bytes('.', 'ascii'))
command_parts = in_command_parts[:]
p = self
while command_parts:
cmd = command_parts.pop(0).decode('ascii')
if cmd.startswith('_'):
raise ProtectedCommandError(in_command_parts)
try:
f = getattr(p, cmd)
except AttributeError:
raise NoSuchCommandError(in_command_parts)
if hasattr(f, 'protected'):
raise ProtectedCommandError(in_command_parts)
if isinstance(f, CommandHandler) and command_parts:
return f.get(command_parts)
p = f
return f
@protected
def run(self, command, *args):
""" finds and runs a command """
logging.debug("processCommand %s(%s)" % (command, args))
try:
f = self.get(command)
except NoSuchCommandError:
self.__unhandled__(command, *args)
return
logging.debug('f %s' % f)
try:
f(*args)
except Exception, e:
logging.error('command raised %s' % e)
logging.error(traceback.format_exc())
raise CommandError(command)
@protected
def __unhandled__(self, cmd, *args):
"""The default handler for commands. Override this method to
apply custom behavior (example, printing) unhandled commands.
"""
logging.debug('unhandled command %s(%s)' % (cmd, args))
class DefaultCommandHandler(CommandHandler):
""" CommandHandler that provides methods for the normal operation of IRC.
If you want your bot to properly respond to pings, etc, you should subclass this.
"""
def ping(self, prefix, server):
self.client.send('PONG', server)
class DefaultBotCommandHandler(CommandHandler):
""" default command handler for bots. methods/attributes are made
available as commands """
@protected
def getVisibleCommands(self, obj=None):
test = (lambda x: isinstance(x, CommandHandler) or \
inspect.ismethod(x) or inspect.isfunction(x))
members = inspect.getmembers(obj or self, test)
return [m for m, _ in members
if (not m.startswith('_') and
not hasattr(getattr(obj, m), 'protected'))]
def help(self, sender, dest, arg=None):
"""list all available commands or get help on a specific command"""
logging.info('help sender=%s dest=%s arg=%s' % (sender, dest, arg))
if not arg:
commands = self.getVisibleCommands()
commands.sort()
helpers.msg(self.client, dest,
"available commands: %s" % " ".join(commands))
else:
try:
f = self.get(arg)
except CommandError, e:
helpers.msg(self.client, dest, str(e))
return
doc = f.__doc__.strip() if f.__doc__ else "No help available"
if not inspect.ismethod(f):
subcommands = self.getVisibleCommands(f)
if subcommands:
doc += " [sub commands: %s]" % " ".join(subcommands)
helpers.msg(self.client, dest, "%s: %s" % (arg, doc))
class BotCommandHandler(DefaultCommandHandler):
""" complete command handler for bots """
def __init__(self, client, command_handler):
DefaultCommandHandler.__init__(self, client)
self.command_handler = command_handler
def privmsg(self, prefix, dest, msg):
self.tryBotCommand(prefix, dest, msg)
@protected
def tryBotCommand(self, prefix, dest, msg):
""" tests a command to see if its a command for the bot, returns True
and calls self.processBotCommand(cmd, sender) if its is.
"""
logging.debug("tryBotCommand('%s' '%s' '%s')" % (prefix, dest, msg))
if dest == self.client.nick:
dest = parse_nick(prefix)[0]
elif msg.startswith(self.client.nick):
msg = msg[len(self.client.nick)+1:]
else:
return False
msg = msg.strip()
parts = msg.split(' ', 1)
command = parts[0]
arg = parts[1:]
try:
self.command_handler.run(command, prefix, dest, *arg)
except CommandError, e:
helpers.msg(self.client, dest, str(e))
return True
| mit | -8,005,940,691,711,680,000 | 31.429245 | 85 | 0.613236 | false |
RIKSOF/scspell-jenkins | scspell_lib/_util.py | 1 | 1481 | ############################################################################
# scspell
# Copyright (C) 2009 Paul Pelzl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
############################################################################
"""
_util -- utility functions which may be useful across the source tree.
"""
# Settings for this session
VERBOSITY_NORMAL = 1
VERBOSITY_DEBUG = 2
VERBOSITY_MAX = VERBOSITY_DEBUG
SETTINGS = {'verbosity' : VERBOSITY_NORMAL}
def mutter(level, text):
"""Print text to the console, if the level is not higher than the
current verbosity setting."""
if level <= SETTINGS['verbosity']:
print text
def set_verbosity(value):
"""Set the verbosity level to a given integral value. The constants
VERBOSITY_* are good choices."""
SETTINGS['verbosity'] = value
# scspell-id: b114984a-c7aa-40a8-9a53-b54fb6a52582
| gpl-2.0 | 4,990,107,826,805,204,000 | 32.659091 | 76 | 0.654288 | false |
robotgear/robotgear | robotgear/settings.py | 1 | 3588 | """
Django settings for robotgear project.
Generated by 'django-admin startproject' using Django 1.11.7.
"""
import os
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'semanticuiforms',
'django_q',
'users',
'teams',
'posts'
]
try:
env = os.environ['ROBOTGEAR_ENV']
except KeyError:
env = 'DEBUG'
if env == 'DEBUG':
DEBUG = True
SECRET_KEY = '1$(%%u4n_(w%@6u&2%lgm^93-in4%8t&pd=o)0c_d(_n7(u&#@'
ALLOWED_HOSTS = []
INSTALLED_APPS += ['debug_toolbar', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': '5432',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
elif env == 'PROD':
pass
elif env == 'TEST':
pass
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'robotgear.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
WSGI_APPLICATION = 'robotgear.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Configure custom user model
AUTH_USER_MODEL = 'users.User'
INTERNAL_IPS = '127.0.0.1'
LOGIN_URL = 'login'
Q_CLUSTER = {
'name': 'robotgear',
'workers': 2,
'recycle': 500,
'catch_up': False,
"ack_failures": True,
'retry': 100000,
'label': 'Task Queue',
'orm': 'default'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'unix:/tmp/memcached.sock',
}
}
| mit | 421,187,835,858,788,700 | 21.566038 | 91 | 0.62709 | false |
TheLady/audio-normalize | setup.py | 1 | 1535 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'docopt',
]
test_requirements = [
# 'pytest',
]
import avconv_normalize
setup(
name='avconv-normalize',
version=avconv_normalize.__version__,
description="Normalize audio via Libav (avconv)",
long_description=readme + '\n\n' + history,
author="Werner Robitza",
author_email='[email protected]',
url='https://github.com/slhck/audio-normalize',
packages=[
'avconv_normalize',
],
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='avconv, ffmpeg, libav, normalize, audio',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# test_suite='tests',
# cmdclass={'test': PyTest},
# tests_require=test_requirements,
entry_points={
'console_scripts': [
'avconv-normalize = avconv_normalize.__main__:main'
]
},
)
| mit | -9,072,358,882,392,883,000 | 25.929825 | 63 | 0.605212 | false |
commonsense/divisi | csc/divisi/flavors.py | 1 | 5345 | from csc.divisi.tensor import DictTensor
from csc.divisi.ordered_set import OrderedSet
from csc.divisi.labeled_view import LabeledView
def add_triple_to_matrix(matrix, triple, value=1.0):
'''
Adds a triple (left, relation, right) to the matrix in the 2D unfolded format.
This is the new add_assertion_tuple.
'''
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
matrix.inc((left, rfeature), value)
matrix.inc((right, lfeature), value)
def set_triple_in_matrix(matrix, triple, value=1.0):
''' Sets a triple (left, relation, right) in the matrix in the 2D
unfolded format to the specified value.
'''
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
matrix[left, rfeature] = value
matrix[right, lfeature] = value
###
### Assertion Tensors
###
class AssertionTensor(LabeledView):
'''
All AssertionTensors have the following functions:
.add_triple(triple, value)
.set_triple(triple, value)
.add_identity(text, value=1.0, relation='Identity')
where triple is (concept1, relation, concept2).
They also have the convenience classmethod from_triples.
'''
def add_identity(self, text, value=1.0, relation='Identity'):
self.add_triple((text, relation, text), value)
def bake(self):
'''
Simplify the representation.
'''
return LabeledView(self.tensor, self._labels)
def add_triples(self, triples, accumulate=True, constant_weight=None):
if accumulate: add = self.add_triple
else: add = self.set_triple
if constant_weight:
for triple in triples:
add(triple, constant_weight)
else:
for triple, weight in triples:
add(triple, weight)
@classmethod
def from_triples(cls, triples, accumulate=True, constant_weight=None):
mat = cls()
mat.add_triples(triples, accumulate, constant_weight)
return mat
def add_identities(self, value=1.0, relation='Identity'):
if not value: return # 0 or False means not to actually add identities.
for concept in self.concepts():
self.add_triple((concept, relation, concept), value)
class ConceptByFeatureMatrix(AssertionTensor):
'''
This is the typical AnalogySpace matrix. It stores each assertion
twice: once as (c1, ('right', rel, c2)) and once as (c2, ('left',
rel, c1)).
This class is a convenience for building matrices in this
format. Once you've add_triple'sed everything, you can call
.bake() to convert it back to a plain old LabeledView of a
DictTensor, just like make_sparse_labeled_tensor does.
'''
def __init__(self):
super(ConceptByFeatureMatrix, self).__init__(
DictTensor(2), [OrderedSet() for _ in '01'])
add_triple = add_triple_to_matrix
set_triple = set_triple_in_matrix
def concepts(self): return self.label_list(0)
class FeatureByConceptMatrix(AssertionTensor):
'''
A transposed ConceptByFeatureMatrix; see it for documentation.
'''
def __init__(self):
super(FeatureByConceptMatrix, self).__init__(
DictTensor(2), [OrderedSet() for _ in '01'])
def add_triple(self, triple, value=1.0):
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
self.inc((rfeature, left), value)
self.inc((lfeature, right), value)
def set_triple(self, triple, value=1.0):
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
self[rfeature, left] = value
self[lfeature, right] = value
def concepts(self): return self.label_list(1)
class ConceptRelationConceptTensor(AssertionTensor):
'''
This is a straightforward encoding of concepts as a 3D tensor.
'''
def __init__(self):
# FIXME: yes this saves space, but it might make a row or column be zero.
concepts, relations = OrderedSet(), OrderedSet()
super(ConceptRelationConceptTensor, self).__init__(
DictTensor(3), [concepts, relations, concepts])
def concepts(self): return self.label_list(0)
def add_triple(self, triple, value=1.0):
left, relation, right = triple
self.inc((left, relation, right), value)
def set_triple(self, triple, value=1.0):
left, relation, right = triple
self[left, relation, right] = value
class MirroringCRCTensor(ConceptRelationConceptTensor):
'''
Every assertion (c1, r, c2) in this tensor has an inverse,
(c2, r', c1).
This is analogous to how the 2D tensor makes left and right features.
Inverse relations are constructed from ordinary relations by
prefixing a '-'.
'''
def add_triple(self, triple, value=1.0):
left, relation, right = triple
self.inc((left, relation, right), value) # normal
self.inc((right, '-'+relation, left), value) # inverse
def set_triple(self, triple, value=1.0):
left, relation, right = triple
self[left, relation, right] = value
self[left, '-'+relation, right] = value
| gpl-3.0 | -1,865,229,490,018,606,000 | 32.198758 | 82 | 0.637605 | false |
moyaproject/moya | moya/elements/registry.py | 1 | 4712 | from __future__ import unicode_literals
from .. import errors
from ..tools import extract_namespace
from .. import namespaces
from ..compat import itervalues
from collections import defaultdict
import inspect
class Meta(object):
logic_skip = False
virtual_tag = False
is_call = False
is_try = False
is_loop = False
app_first_arg = False
text_nodes = None
trap_exceptions = False
translate = False
class ElementRegistry(object):
default_registry = None
_registry_stack = []
def clear(self):
self._registry.clear()
self._dynamic_elements.clear()
del self._registry_stack[:]
@classmethod
def push_registry(cls, registry):
cls._registry_stack.append(registry)
@classmethod
def pop_registry(cls):
cls._registry_stack.pop()
@classmethod
def get_default(cls):
return cls._registry_stack[-1]
def __init__(self, update_from_default=True):
self._registry = defaultdict(dict)
self._dynamic_elements = {}
if update_from_default:
self._registry.update(self.default_registry._registry)
self._dynamic_elements.update(self.default_registry._dynamic_elements)
def clone(self):
"""Return a copy of this registry"""
registry = ElementRegistry(update_from_default=False)
registry._registry = self._registry.copy()
registry._dynamic_elements = self._dynamic_elements.copy()
return registry
def set_default(self):
"""Reset this registry to the default registry (before project loaded)"""
self._registry = self.default_registry._registry.copy()
self._dynamic_elements = self.default_registry._dynamic_elements.copy()
def register_element(self, xmlns, name, element):
"""Add a dynamic element to the element registry"""
xmlns = xmlns or namespaces.run
if name in self._registry[xmlns]:
element_class = self._registry[xmlns][name]
definition = getattr(element_class, "_location", None)
if definition is None:
definition = inspect.getfile(element_class)
if xmlns:
raise errors.ElementError(
'<{}> already registered in "{}" for xmlns "{}"'.format(
name, definition, xmlns
),
element=getattr(element, "element", element),
)
else:
raise errors.ElementError(
'<{}/> already registered in "{}"'.format(name, definition),
element=element,
)
self._registry[xmlns][name] = element
def add_dynamic_registry(self, xmlns, element_callable):
"""Add a dynamic registry (element factory)"""
self._dynamic_elements[xmlns] = element_callable
def clear_registry(self):
"""Clear the registry (called on archive reload)"""
self._registry.clear()
def get_elements_in_xmlns(self, xmlns):
"""Get all elements defined within a given namespace"""
return self._registry.get(xmlns, {})
def get_elements_in_lib(self, long_name):
"""Get all elements defined by a given library"""
lib_elements = []
for namespace in itervalues(self._registry):
lib_elements.extend(
element
for element in itervalues(namespace)
if element._lib_long_name == long_name
)
return lib_elements
def get_element_type(self, xmlns, name):
"""Get an element by namespace and name"""
if xmlns in self._dynamic_elements:
return self._dynamic_elements[xmlns](name)
return self._registry.get(xmlns, {}).get(name, None)
def find_xmlns(self, name):
"""Find the xmlns with contain a given tag, or return None"""
for xmlns in sorted(self._registry.keys()):
if name in self._registry[xmlns]:
return xmlns
return None
def check_namespace(self, xmlns):
"""Check if a namespace exists in the registry"""
return xmlns in self._registry
def set_registry(self, registry):
"""Restore a saved registry"""
self._registry = registry._registry.copy()
self._dynamic_elements = registry._dynamic_elements.copy()
def get_tag(self, tag):
"""Get a tag from it's name (in Clarke's notation)"""
return self.get_element_type(*extract_namespace(tag))
default_registry = ElementRegistry.default_registry = ElementRegistry(
update_from_default=False
)
ElementRegistry.push_registry(ElementRegistry.default_registry)
| mit | -1,306,243,355,464,368,600 | 31.951049 | 82 | 0.610781 | false |
cmcqueen/simplerandom | python/python3/simplerandom/iterators/_iterators_py.py | 1 | 40947 |
from simplerandom._bitcolumnmatrix import BitColumnMatrix
__all__ = [
"Cong",
"SHR3",
"MWC1",
"MWC2",
"MWC64",
"KISS",
"KISS2",
"LFSR113",
"LFSR88",
"_traverse_iter",
]
def _traverse_iter(o, tree_types=(list, tuple)):
"""Iterate over nested containers and/or iterators.
This allows generator __init__() functions to be passed seeds either as
a series of arguments, or as a list/tuple.
"""
SIMPLERANDOM_BITS = 32
SIMPLERANDOM_MOD = 2**SIMPLERANDOM_BITS
SIMPLERANDOM_MASK = SIMPLERANDOM_MOD - 1
if isinstance(o, tree_types) or getattr(o, '__iter__', False):
for value in o:
for subvalue in _traverse_iter(value):
while True:
yield subvalue & SIMPLERANDOM_MASK
subvalue >>= SIMPLERANDOM_BITS
# If value is negative, then it effectively has infinitely extending
# '1' bits (modelled as a 2's complement representation). So when
# right-shifting it, it will eventually get to -1, and any further
# right-shifting will not change it.
if subvalue == 0 or subvalue == -1:
break
else:
yield o
def _repeat_iter(input_iter):
"""Iterate over the input iter values. Then repeat the last value
indefinitely. This is useful to repeat seed values when an insufficient
number of seeds are provided.
E.g. KISS(1) effectively becomes KISS(1, 1, 1, 1), rather than (if we just
used default values) KISS(1, default-value, default-value, default-value)
It is better to repeat the last seed value, rather than just using default
values. Given two generators seeded with an insufficient number of seeds,
repeating the last seed value means their states are more different from
each other, with less correlation between their generated outputs.
"""
last_value = None
for value in input_iter:
last_value = value
yield value
if last_value is not None:
while True:
yield last_value
def _next_seed_int32_or_default(seed_iter, default_value):
try:
seed_item = next(seed_iter)
except StopIteration:
return default_value
else:
if seed_item is None:
return default_value
else:
return (int(seed_item) & 0xFFFFFFFF)
def _geom_series_uint32(r, n):
"""Unsigned integer calculation of sum of geometric series:
1 + r + r^2 + r^3 + ... r^(n-1)
summed to n terms.
Calculated modulo 2**32.
Use the formula (r**n - 1) / (r - 1)
"""
if n == 0:
return 0
if n == 1 or r == 0:
return 1
m = 2**32
# Split (r - 1) into common factors with the modulo 2**32 -- i.e. all
# factors of 2; and other factors which are coprime with the modulo 2**32.
other_factors = r - 1
common_factor = 1
while (other_factors % 2) == 0:
other_factors //= 2
common_factor *= 2
other_factors_inverse = pow(other_factors, m - 1, m)
numerator = pow(r, n, common_factor * m) - 1
return (numerator // common_factor * other_factors_inverse) % m
class Cong(object):
'''Congruential random number generator
This is a congruential generator with the widely used
69069 multiplier: x[n]=69069x[n-1]+12345. It has
period 2**32.
The leading half of its 32 bits seem to pass tests,
but bits in the last half are too regular. It fails
tests for which those bits play a significant role.
But keep in mind that it is a rare application for
which the trailing bits play a significant role. Cong
is one of the most widely used generators of the last
30 years, as it was the system generator for VAX and
was incorporated in several popular software packages,
all seemingly without complaint.
'''
SIMPLERANDOM_MOD = 2**32
SIMPLERANDOM_MAX = 2**32 - 1
CONG_CYCLE_LEN = 2**32
CONG_MULT = 69069
CONG_CONST = 12345
@staticmethod
def min():
return 0
@staticmethod
def max():
return Cong.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
self.cong = _next_seed_int32_or_default(seed_iter, 0)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
pass
def __next__(self):
self.cong = (69069 * self.cong + 12345) & 0xFFFFFFFF
return self.cong
def current(self):
return self.cong
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
self.cong ^= value_int
next(self)
return self.cong
def __iter__(self):
return self
def getstate(self):
return (self.cong, )
def setstate(self, state):
(self.cong, ) = (int(val) & 0xFFFFFFFF for val in state)
def jumpahead(self, n):
# Cong.jumpahead(n) = r**n * x mod 2**32 +
# c * (1 + r + r**2 + ... + r**(n-1)) mod 2**32
# where r = 69069 and c = 12345.
#
# The part c * (1 + r + r**2 + ... + r**(n-1)) is a geometric series.
# For calculating geometric series mod 2**32, see:
# http://www.codechef.com/wiki/tutorial-just-simple-sum#Back_to_the_geometric_series
n = int(n) % self.CONG_CYCLE_LEN
mult_exp = pow(self.CONG_MULT, n, self.SIMPLERANDOM_MOD)
add_const = (_geom_series_uint32(self.CONG_MULT, n) * self.CONG_CONST) & 0xFFFFFFFF
self.cong = (mult_exp * self.cong + add_const) & 0xFFFFFFFF
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.cong)) + ")"
class SHR3(object):
'''3-shift-register random number generator
SHR3 is a 3-shift-register generator with period
2**32-1. It uses y[n]=y[n-1](I+L^13)(I+R^17)(I+L^5),
with the y's viewed as binary vectors, L the 32x32
binary matrix that shifts a vector left 1, and R its
transpose.
SHR3 seems to pass all except those related to the
binary rank test, since 32 successive values, as
binary vectors, must be linearly independent, while
32 successive truly random 32-bit integers, viewed
as binary vectors, will be linearly independent only
about 29% of the time.
'''
SIMPLERANDOM_MOD = 2**32
SIMPLERANDOM_MAX = 2**32 - 1
SHR3_CYCLE_LEN = 2**32 - 1
_SHR3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_SHR3_MATRIX_b = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,-17)
_SHR3_MATRIX_c = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,5)
_SHR3_MATRIX = _SHR3_MATRIX_c * _SHR3_MATRIX_b * _SHR3_MATRIX_a
@staticmethod
def min():
return 1
@staticmethod
def max():
return SHR3.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
self.shr3 = _next_seed_int32_or_default(seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
if self.shr3 == 0:
# 0 is a bad seed. Invert to get a good seed.
self.shr3 = 0xFFFFFFFF
def __next__(self):
shr3 = self.shr3
shr3 ^= (shr3 & 0x7FFFF) << 13
shr3 ^= shr3 >> 17
shr3 ^= (shr3 & 0x7FFFFFF) << 5
self.shr3 = shr3
return shr3
def current(self):
return self.shr3
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
self.shr3 ^= value_int
self.sanitise()
next(self)
return self.shr3
def __iter__(self):
return self
def getstate(self):
return (self.shr3, )
def setstate(self, state):
(self.shr3, ) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
n = int(n) % self.SHR3_CYCLE_LEN
shr3 = pow(self._SHR3_MATRIX, n) * self.shr3
self.shr3 = shr3
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.shr3)) + ")"
class MWC2(object):
'''"Multiply-with-carry" random number generator
Very similar to MWC1, except that it concatenates the
two 16-bit MWC generators differently. The 'x'
generator is rotated 16 bits instead of just shifted
16 bits.
This gets much better test results than MWC1 in
L'Ecuyer's TestU01 test suite, so it should probably
be preferred.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_MWC_UPPER_MULT = 36969
_MWC_LOWER_MULT = 18000
_MWC_UPPER_MODULO = _MWC_UPPER_MULT * 2**16 - 1
_MWC_LOWER_MODULO = _MWC_LOWER_MULT * 2**16 - 1
_MWC_UPPER_CYCLE_LEN = _MWC_UPPER_MULT * 2**16 // 2 - 1
_MWC_LOWER_CYCLE_LEN = _MWC_LOWER_MULT * 2**16 // 2 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return MWC2.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.mwc_upper = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.mwc_lower = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self._sanitise_upper()
self._sanitise_lower()
def _sanitise_upper(self):
mwc_upper_orig = self.mwc_upper
# There are a few bad states--that is, any multiple of
# _MWC_UPPER_MODULO -- that is 0x9068FFFF (which is 36969 * 2**16 - 1).
sanitised_value = mwc_upper_orig % 0x9068FFFF
if sanitised_value == 0:
# Invert to get a good seed.
sanitised_value = (mwc_upper_orig ^ 0xFFFFFFFF) % 0x9068FFFF
self.mwc_upper = sanitised_value
def _sanitise_lower(self):
mwc_lower_orig = self.mwc_lower
# There are a few bad states--that is, any multiple of
# _MWC_LOWER_MODULO -- that is 0x464FFFFF (which is 18000 * 2**16 - 1).
sanitised_value = mwc_lower_orig % 0x464FFFFF
if sanitised_value == 0:
# Invert to get a good seed.
sanitised_value = (mwc_lower_orig ^ 0xFFFFFFFF) % 0x464FFFFF
self.mwc_lower = sanitised_value
def _next_upper(self):
self.mwc_upper = 36969 * (self.mwc_upper & 0xFFFF) + (self.mwc_upper >> 16)
def _next_lower(self):
self.mwc_lower = 18000 * (self.mwc_lower & 0xFFFF) + (self.mwc_lower >> 16)
def __next__(self):
# Note: this is apparently equivalent to:
# self.mwc_upper = (36969 * self.mwc_upper) % 0x9068FFFF
# self.mwc_lower = (18000 * self.mwc_lower) % 0x464FFFFF
# See Random Number Generation, Pierre L’Ecuyer, section 3.6 Linear Recurrences With Carry
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.136.6898&rep=rep1&type=pdf
self.mwc_upper = 36969 * (self.mwc_upper & 0xFFFF) + (self.mwc_upper >> 16)
self.mwc_lower = 18000 * (self.mwc_lower & 0xFFFF) + (self.mwc_lower >> 16)
return self.current() # call self.current() so that MWC1 can over-ride it
def current(self):
return (((self.mwc_upper & 0xFFFF) << 16) + (self.mwc_upper >> 16) + self.mwc_lower) & 0xFFFFFFFF
mwc = property(current) # Note that this must be over-ridden again in MWC1
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x1
if selector == 0:
self.mwc_upper ^= value_int
self._sanitise_upper()
self._next_upper()
else:
self.mwc_lower ^= value_int
self._sanitise_lower()
self._next_lower()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.mwc_upper, self.mwc_lower)
def setstate(self, state):
(self.mwc_upper, self.mwc_lower) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
# See next() note on functional equivalence.
n_upper = int(n) % self._MWC_UPPER_CYCLE_LEN
self.mwc_upper = pow(self._MWC_UPPER_MULT, n_upper, self._MWC_UPPER_MODULO) * self.mwc_upper % self._MWC_UPPER_MODULO
n_lower = int(n) % self._MWC_LOWER_CYCLE_LEN
self.mwc_lower = pow(self._MWC_LOWER_MULT, n_lower, self._MWC_LOWER_MODULO) * self.mwc_lower % self._MWC_LOWER_MODULO
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) + "," + repr(int(self.mwc_lower)) + ")"
class MWC1(MWC2):
'''"Multiply-with-carry" random number generator
This is the MWC as defined in Marsaglia's 1999
newsgroup post.
This uses two MWC generators to generate high and
low 16-bit parts, which are then combined to make a
32-bit value.
The MWC generator concatenates two 16-bit multiply-
with-carry generators:
x[n]=36969x[n-1]+carry,
y[n]=18000y[n-1]+carry mod 2**16,
It has a period about 2**60.
This seems to pass all Marsaglia's Diehard tests.
However, it fails many of L'Ecuyer's TestU01
tests. The modified MWC2 generator passes many more
tests in TestU01, and should probably be preferred,
unless backwards compatibility is required.
'''
def current(self):
return (((self.mwc_upper & 0xFFFF) << 16) + self.mwc_lower) & 0xFFFFFFFF
# We have to over-ride this again, because of the way property() works.
mwc = property(current)
class MWC64(object):
'''"Multiply-with-carry" random number generator
This uses a single MWC generator with 64 bits to
generate a 32-bit value. The seeds should be 32-bit
values.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_MWC64_MULT = 698769069
_MWC64_MODULO = _MWC64_MULT * 2**32 - 1
_MWC64_CYCLE_LEN = _MWC64_MULT * 2**32 // 2 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return MWC64.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.mwc_upper = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.mwc_lower = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
state64 = (self.mwc_upper << 32) + self.mwc_lower
temp = state64
was_changed = False
# There are a few bad seeds--that is, seeds that are a multiple of
# 0x29A65EACFFFFFFFF (which is 698769069 * 2**32 - 1).
if state64 >= 0x29A65EACFFFFFFFF:
was_changed = True
temp = state64 % 0x29A65EACFFFFFFFF
if temp == 0:
# Invert to get a good seed.
temp = (state64 ^ 0xFFFFFFFFFFFFFFFF) % 0x29A65EACFFFFFFFF
was_changed = True
if was_changed:
self.mwc_upper = temp >> 32
self.mwc_lower = temp & 0xFFFFFFFF
def __next__(self):
# Note: this is apparently equivalent to:
# temp64 = (self.mwc_upper << 32) + self.mwc_lower
# temp64 = (698769069 * temp64) % 0x29A65EACFFFFFFFF
# See reference in MWC2.next().
temp64 = 698769069 * self.mwc_lower + self.mwc_upper
self.mwc_lower = temp64 & 0xFFFFFFFF
self.mwc_upper = (temp64 >> 32) & 0xFFFFFFFF
return self.mwc_lower
def current(self):
return self.mwc_lower
mwc = property(current)
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x1
if selector == 0:
self.mwc_upper ^= value_int
else:
self.mwc_lower ^= value_int
self.sanitise()
next(self)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.mwc_upper, self.mwc_lower)
def setstate(self, state):
(self.mwc_upper, self.mwc_lower) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
# See MWC2.next() note on functional equivalence.
n = int(n) % self._MWC64_CYCLE_LEN
temp64 = (self.mwc_upper << 32) + self.mwc_lower
temp64 = pow(self._MWC64_MULT, n, self._MWC64_MODULO) * temp64 % self._MWC64_MODULO
self.mwc_lower = temp64 & 0xFFFFFFFF
self.mwc_upper = (temp64 >> 32) & 0xFFFFFFFF
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) + "," + repr(int(self.mwc_lower)) + ")"
class KISS(object):
'''"Keep It Simple Stupid" random number generator
It combines the MWC2, Cong, SHR3 generators. Period is
about 2**123.
This is based on, but not identical to, Marsaglia's
KISS generator as defined in his 1999 newsgroup post.
That generator most significantly has problems with its
SHR3 component (see notes on SHR3). Since we are not
keeping compatibility with the 1999 KISS generator for
that reason, we take the opportunity to slightly
update the MWC and Cong generators too.
'''
SIMPLERANDOM_MAX = 2**32 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return KISS.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.random_mwc = MWC2(repeat_seed_iter)
self.random_cong = Cong(repeat_seed_iter)
self.random_shr3 = SHR3(repeat_seed_iter)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def __next__(self):
mwc_val = next(self.random_mwc)
cong_val = next(self.random_cong)
shr3_val = next(self.random_shr3)
return ((mwc_val ^ cong_val) + shr3_val) & 0xFFFFFFFF
def current(self):
return ((self.random_mwc.current() ^ self.random_cong.cong) + self.random_shr3.shr3) & 0xFFFFFFFF
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x3
if selector == 0:
self.random_mwc.mwc_upper ^= value_int
self.random_mwc._sanitise_upper()
self.random_mwc._next_upper()
elif selector == 1:
self.random_mwc.mwc_lower ^= value_int
self.random_mwc._sanitise_lower()
self.random_mwc._next_lower()
elif selector == 2:
self.random_cong.cong ^= value_int
# Cong doesn't need any sanitising
next(self.random_cong)
else: # selector == 3
self.random_shr3.shr3 ^= value_int
self.random_shr3.sanitise()
next(self.random_shr3)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.random_mwc.getstate(), self.random_cong.getstate(), self.random_shr3.getstate())
def setstate(self, state):
(mwc_state, cong_state, shr3_state) = state
self.random_mwc.setstate(mwc_state)
self.random_cong.setstate(cong_state)
self.random_shr3.setstate(shr3_state)
def jumpahead(self, n):
self.random_mwc.jumpahead(n)
self.random_cong.jumpahead(n)
self.random_shr3.jumpahead(n)
def _get_mwc_upper(self):
return self.random_mwc.mwc_upper
def _set_mwc_upper(self, value):
self.random_mwc.mwc_upper = value
mwc_upper = property(_get_mwc_upper, _set_mwc_upper)
def _get_mwc_lower(self):
return self.random_mwc.mwc_lower
def _set_mwc_lower(self, value):
self.random_mwc.mwc_lower = value
mwc_lower = property(_get_mwc_lower, _set_mwc_lower)
def _get_mwc(self):
return self.random_mwc.current()
mwc = property(_get_mwc)
def _get_shr3(self):
return self.random_shr3.shr3
def _set_shr3(self, value):
self.random_shr3.shr3 = value
shr3 = property(_get_shr3, _set_shr3)
def _get_cong(self):
return self.random_cong.cong
def _set_cong(self, value):
self.random_cong.cong = value
cong = property(_get_cong, _set_cong)
def __repr__(self):
return (self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) +
"," + repr(int(self.mwc_lower)) +
"," + repr(int(self.cong)) +
"," + repr(int(self.shr3)) + ")")
class KISS2(object):
'''"Keep It Simple Stupid" random number generator
It combines the MWC64, Cong, SHR3 generators. Period
is about 2**123.
This is a slightly updated KISS generator design, from
a newsgroup post in 2003:
http://groups.google.com/group/sci.math/msg/9959175f66dd138f
The MWC component uses a single 64-bit calculation,
instead of two 32-bit calculations that are combined.
'''
SIMPLERANDOM_MAX = 2**32 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return KISS2.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.random_mwc = MWC64(repeat_seed_iter)
self.random_cong = Cong(repeat_seed_iter)
self.random_shr3 = SHR3(repeat_seed_iter)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def __next__(self):
mwc_val = next(self.random_mwc)
cong_val = next(self.random_cong)
shr3_val = next(self.random_shr3)
return (mwc_val + cong_val + shr3_val) & 0xFFFFFFFF
def current(self):
return (self.random_mwc.current() + self.random_cong.cong + self.random_shr3.shr3) & 0xFFFFFFFF
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x3
if selector == 0:
self.random_mwc.mwc_upper ^= value_int
self.random_mwc.sanitise()
next(self.random_mwc)
elif selector == 1:
self.random_mwc.mwc_lower ^= value_int
self.random_mwc.sanitise()
next(self.random_mwc)
elif selector == 2:
self.random_cong.cong ^= value_int
# Cong doesn't need any sanitising
next(self.random_cong)
else: # selector == 3
self.random_shr3.shr3 ^= value_int
self.random_shr3.sanitise()
next(self.random_shr3)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.random_mwc.getstate(), self.random_cong.getstate(), self.random_shr3.getstate())
def setstate(self, state):
(mwc_state, cong_state, shr3_state) = state
self.random_mwc.setstate(mwc_state)
self.random_cong.setstate(cong_state)
self.random_shr3.setstate(shr3_state)
def jumpahead(self, n):
self.random_mwc.jumpahead(n)
self.random_cong.jumpahead(n)
self.random_shr3.jumpahead(n)
def _get_mwc_upper(self):
return self.random_mwc.mwc_upper
def _set_mwc_upper(self, value):
self.random_mwc.mwc_upper = value
mwc_upper = property(_get_mwc_upper, _set_mwc_upper)
def _get_mwc_lower(self):
return self.random_mwc.mwc_lower
def _set_mwc_lower(self, value):
self.random_mwc.mwc_lower = value
mwc_lower = property(_get_mwc_lower, _set_mwc_lower)
def _get_mwc(self):
return self.random_mwc.mwc
mwc = property(_get_mwc)
def _get_shr3(self):
return self.random_shr3.shr3
def _set_shr3(self, value):
self.random_shr3.shr3 = value
shr3 = property(_get_shr3, _set_shr3)
def _get_cong(self):
return self.random_cong.cong
def _set_cong(self, value):
self.random_cong.cong = value
cong = property(_get_cong, _set_cong)
def __repr__(self):
return (self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) +
"," + repr(int(self.mwc_lower)) +
"," + repr(int(self.cong)) +
"," + repr(int(self.shr3)) + ")")
def lfsr_next_one_seed(seed_iter, min_value_shift):
"""High-quality seeding for LFSR generators.
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
In case generators are seeded from an incrementing input (such as a system
timer), and between increments only the lower bits may change, we would
also like the lower bits of the input to change the initial state, and not
just be discarded. So we do basic manipulation of the seed input value to
ensure that all bits of the seed input affect the initial state.
"""
try:
seed = next(seed_iter)
except StopIteration:
return 0xFFFFFFFF
else:
if seed is None:
return 0xFFFFFFFF
else:
seed = int(seed) & 0xFFFFFFFF
working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF
min_value = 1 << min_value_shift
if working_seed < min_value:
working_seed = (seed << 24) & 0xFFFFFFFF
if working_seed < min_value:
working_seed ^= 0xFFFFFFFF
return working_seed
def lfsr_validate_one_seed(seed, min_value_shift):
'''Validate seeds for LFSR generators
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
This is a light-weight validation of state, used from setstate().
'''
min_value = 1 << min_value_shift
if seed < min_value:
seed ^= 0xFFFFFFFF
return seed
def lfsr_state_z(z):
return int(z ^ ((z << 16) & 0xFFFFFFFF))
def lfsr_repr_z(z):
return repr(int(z ^ ((z << 16) & 0xFFFFFFFF)))
class LFSR113(object):
'''Combined LFSR random number generator by L'Ecuyer
It combines 4 LFSR generators. The generators have been
chosen for maximal equidistribution.
The period is approximately 2**113.
"Tables of Maximally-Equidistributed Combined Lfsr Generators"
P. L'Ecuyer
Mathematics of Computation, 68, 225 (1999), 261-269.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_LFSR113_1_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,6)
_LFSR113_1_MATRIX_b = BitColumnMatrix.shift(32,-13)
_LFSR113_1_MATRIX_c = BitColumnMatrix.mask(32, 1, 32)
_LFSR113_1_MATRIX_d = BitColumnMatrix.shift(32,18)
_LFSR113_1_MATRIX = _LFSR113_1_MATRIX_d * _LFSR113_1_MATRIX_c + _LFSR113_1_MATRIX_b * _LFSR113_1_MATRIX_a
_LFSR113_1_CYCLE_LEN = 2**(32 - 1) - 1
_LFSR113_2_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,2)
_LFSR113_2_MATRIX_b = BitColumnMatrix.shift(32,-27)
_LFSR113_2_MATRIX_c = BitColumnMatrix.mask(32, 3, 32)
_LFSR113_2_MATRIX_d = BitColumnMatrix.shift(32,2)
_LFSR113_2_MATRIX = _LFSR113_2_MATRIX_d * _LFSR113_2_MATRIX_c + _LFSR113_2_MATRIX_b * _LFSR113_2_MATRIX_a
_LFSR113_2_CYCLE_LEN = 2**(32 - 3) - 1
_LFSR113_3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_LFSR113_3_MATRIX_b = BitColumnMatrix.shift(32,-21)
_LFSR113_3_MATRIX_c = BitColumnMatrix.mask(32, 4, 32)
_LFSR113_3_MATRIX_d = BitColumnMatrix.shift(32,7)
_LFSR113_3_MATRIX = _LFSR113_3_MATRIX_d * _LFSR113_3_MATRIX_c + _LFSR113_3_MATRIX_b * _LFSR113_3_MATRIX_a
_LFSR113_3_CYCLE_LEN = 2**(32 - 4) - 1
_LFSR113_4_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,3)
_LFSR113_4_MATRIX_b = BitColumnMatrix.shift(32,-12)
_LFSR113_4_MATRIX_c = BitColumnMatrix.mask(32, 7, 32)
_LFSR113_4_MATRIX_d = BitColumnMatrix.shift(32,13)
_LFSR113_4_MATRIX = _LFSR113_4_MATRIX_d * _LFSR113_4_MATRIX_c + _LFSR113_4_MATRIX_b * _LFSR113_4_MATRIX_a
_LFSR113_4_CYCLE_LEN = 2**(32 - 7) - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return LFSR113.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.z1 = lfsr_next_one_seed(repeat_seed_iter, 1)
self.z2 = lfsr_next_one_seed(repeat_seed_iter, 3)
self.z3 = lfsr_next_one_seed(repeat_seed_iter, 4)
self.z4 = lfsr_next_one_seed(repeat_seed_iter, 7)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self.z1 = lfsr_validate_one_seed(self.z1, 1)
self.z2 = lfsr_validate_one_seed(self.z2, 3)
self.z3 = lfsr_validate_one_seed(self.z3, 4)
self.z4 = lfsr_validate_one_seed(self.z4, 7)
def _next_z1(self):
b = (((self.z1 & 0x03FFFFFF) << 6) ^ self.z1) >> 13
self.z1 = ((self.z1 & 0x00003FFE) << 18) ^ b
def _next_z2(self):
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 27
self.z2 = ((self.z2 & 0x3FFFFFF8) << 2) ^ b
def _next_z3(self):
b = (((self.z3 & 0x0007FFFF) << 13) ^ self.z3) >> 21
self.z3 = ((self.z3 & 0x01FFFFF0) << 7) ^ b
def _next_z4(self):
b = (((self.z4 & 0x1FFFFFFF) << 3) ^ self.z4) >> 12
self.z4 = ((self.z4 & 0x0007FF80) << 13) ^ b
def __next__(self):
b = (((self.z1 & 0x03FFFFFF) << 6) ^ self.z1) >> 13
self.z1 = ((self.z1 & 0x00003FFE) << 18) ^ b
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 27
self.z2 = ((self.z2 & 0x3FFFFFF8) << 2) ^ b
b = (((self.z3 & 0x0007FFFF) << 13) ^ self.z3) >> 21
self.z3 = ((self.z3 & 0x01FFFFF0) << 7) ^ b
b = (((self.z4 & 0x1FFFFFFF) << 3) ^ self.z4) >> 12
self.z4 = ((self.z4 & 0x0007FF80) << 13) ^ b
return self.z1 ^ self.z2 ^ self.z3 ^ self.z4
def current(self):
return self.z1 ^ self.z2 ^ self.z3 ^ self.z4
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 30) & 0x3
if selector == 0:
self.z1 = lfsr_validate_one_seed(self.z1 ^ value_int, 1)
self._next_z1()
elif selector == 1:
self.z2 = lfsr_validate_one_seed(self.z2 ^ value_int, 3)
self._next_z2()
elif selector == 2:
self.z3 = lfsr_validate_one_seed(self.z3 ^ value_int, 4)
self._next_z3()
else: # selector == 3
self.z4 = lfsr_validate_one_seed(self.z4 ^ value_int, 7)
self._next_z4()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (lfsr_state_z(self.z1), lfsr_state_z(self.z2), lfsr_state_z(self.z3), lfsr_state_z(self.z4))
def setstate(self, state):
self.seed(state)
def jumpahead(self, n):
n_1 = int(n) % self._LFSR113_1_CYCLE_LEN
n_2 = int(n) % self._LFSR113_2_CYCLE_LEN
n_3 = int(n) % self._LFSR113_3_CYCLE_LEN
n_4 = int(n) % self._LFSR113_4_CYCLE_LEN
z1 = pow(self._LFSR113_1_MATRIX, n_1) * self.z1
self.z1 = z1
z2 = pow(self._LFSR113_2_MATRIX, n_2) * self.z2
self.z2 = z2
z3 = pow(self._LFSR113_3_MATRIX, n_3) * self.z3
self.z3 = z3
z4 = pow(self._LFSR113_4_MATRIX, n_4) * self.z4
self.z4 = z4
def __repr__(self):
return (self.__class__.__name__ + "(" + lfsr_repr_z(self.z1) +
"," + lfsr_repr_z(self.z2) +
"," + lfsr_repr_z(self.z3) +
"," + lfsr_repr_z(self.z4) + ")")
class LFSR88(object):
'''Combined LFSR random number generator by L'Ecuyer
It combines 3 LFSR generators. The generators have been
chosen for maximal equidistribution.
The period is approximately 2**88.
"Maximally Equidistributed Combined Tausworthe Generators"
P. L'Ecuyer
Mathematics of Computation, 65, 213 (1996), 203-213.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_LFSR88_1_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_LFSR88_1_MATRIX_b = BitColumnMatrix.shift(32,-19)
_LFSR88_1_MATRIX_c = BitColumnMatrix.mask(32, 1, 32)
_LFSR88_1_MATRIX_d = BitColumnMatrix.shift(32,12)
_LFSR88_1_MATRIX = _LFSR88_1_MATRIX_d * _LFSR88_1_MATRIX_c + _LFSR88_1_MATRIX_b * _LFSR88_1_MATRIX_a
_LFSR88_1_CYCLE_LEN = 2**(32 - 1) - 1
_LFSR88_2_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,2)
_LFSR88_2_MATRIX_b = BitColumnMatrix.shift(32,-25)
_LFSR88_2_MATRIX_c = BitColumnMatrix.mask(32, 3, 32)
_LFSR88_2_MATRIX_d = BitColumnMatrix.shift(32,4)
_LFSR88_2_MATRIX = _LFSR88_2_MATRIX_d * _LFSR88_2_MATRIX_c + _LFSR88_2_MATRIX_b * _LFSR88_2_MATRIX_a
_LFSR88_2_CYCLE_LEN = 2**(32 - 3) - 1
_LFSR88_3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,3)
_LFSR88_3_MATRIX_b = BitColumnMatrix.shift(32,-11)
_LFSR88_3_MATRIX_c = BitColumnMatrix.mask(32, 4, 32)
_LFSR88_3_MATRIX_d = BitColumnMatrix.shift(32,17)
_LFSR88_3_MATRIX = _LFSR88_3_MATRIX_d * _LFSR88_3_MATRIX_c + _LFSR88_3_MATRIX_b * _LFSR88_3_MATRIX_a
_LFSR88_3_CYCLE_LEN = 2**(32 - 4) - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return LFSR88.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.z1 = lfsr_next_one_seed(repeat_seed_iter, 1)
self.z2 = lfsr_next_one_seed(repeat_seed_iter, 3)
self.z3 = lfsr_next_one_seed(repeat_seed_iter, 4)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self.z1 = lfsr_validate_one_seed(self.z1, 1)
self.z2 = lfsr_validate_one_seed(self.z2, 3)
self.z3 = lfsr_validate_one_seed(self.z3, 4)
def _next_z1(self):
b = (((self.z1 & 0x0007FFFF) << 13) ^ self.z1) >> 19
self.z1 = ((self.z1 & 0x000FFFFE) << 12) ^ b
def _next_z2(self):
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 25
self.z2 = ((self.z2 & 0x0FFFFFF8) << 4) ^ b
def _next_z3(self):
b = (((self.z3 & 0x1FFFFFFF) << 3) ^ self.z3) >> 11
self.z3 = ((self.z3 & 0x00007FF0) << 17) ^ b
def __next__(self):
b = (((self.z1 & 0x0007FFFF) << 13) ^ self.z1) >> 19
self.z1 = ((self.z1 & 0x000FFFFE) << 12) ^ b
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 25
self.z2 = ((self.z2 & 0x0FFFFFF8) << 4) ^ b
b = (((self.z3 & 0x1FFFFFFF) << 3) ^ self.z3) >> 11
self.z3 = ((self.z3 & 0x00007FF0) << 17) ^ b
return self.z1 ^ self.z2 ^ self.z3
def current(self):
return self.z1 ^ self.z2 ^ self.z3
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
if current < 1431655765: # constant is 2^32 / 3
self.z1 = lfsr_validate_one_seed(self.z1 ^ value_int, 1)
self._next_z1()
elif current < 2863311531: # constant is 2^32 * 2 / 3
self.z2 = lfsr_validate_one_seed(self.z2 ^ value_int, 3)
self._next_z2()
else:
self.z3 = lfsr_validate_one_seed(self.z3 ^ value_int, 4)
self._next_z3()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (lfsr_state_z(self.z1), lfsr_state_z(self.z2), lfsr_state_z(self.z3))
def setstate(self, state):
self.seed(state)
def jumpahead(self, n):
n_1 = int(n) % self._LFSR88_1_CYCLE_LEN
n_2 = int(n) % self._LFSR88_2_CYCLE_LEN
n_3 = int(n) % self._LFSR88_3_CYCLE_LEN
z1 = pow(self._LFSR88_1_MATRIX, n_1) * self.z1
self.z1 = z1
z2 = pow(self._LFSR88_2_MATRIX, n_2) * self.z2
self.z2 = z2
z3 = pow(self._LFSR88_3_MATRIX, n_3) * self.z3
self.z3 = z3
def __repr__(self):
return (self.__class__.__name__ + "(" + lfsr_repr_z(self.z1) +
"," + lfsr_repr_z(self.z2) +
"," + lfsr_repr_z(self.z3) + ")")
| mit | -5,898,886,228,180,015,000 | 35.298759 | 125 | 0.576945 | false |
GNOME/gedit-plugins | plugins/commander/modules/align.py | 1 | 8900 | # -*- coding: utf-8 -*-
#
# align.py - align commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import commander.commands as commands
import commander.commands.completion
import commander.commands.result
import commander.commands.exceptions
from functools import reduce
import re
__commander_module__ = True
def _get_groups(m, group, add_ws_group):
if len(m.groups()) <= group - 1:
gidx = 0
else:
gidx = group
if len(m.groups()) <= add_ws_group - 1:
wsidx = 0
else:
wsidx = add_ws_group
# Whitespace group must be contained in align group
if m.start(wsidx) < m.start(gidx) or m.end(wsidx) > m.end(gidx):
wsidx = gidx
return (gidx, wsidx)
class Line:
def __init__(self, line, reg, tabwidth):
self.tabwidth = tabwidth
self.line = line
# All the separators
self.matches = list(reg.finditer(line))
# @newline initially contains the first column
if not self.matches:
# No separator found
self.newline = str(line)
else:
# Up to first separator
self.newline = line[0:self.matches[0].start(0)]
def matches_len(self):
return len(self.matches)
def new_len(self, extra=''):
return len((self.newline + extra).expandtabs(self.tabwidth))
def match(self, idx):
if idx >= self.matches_len():
return None
return self.matches[idx]
def append(self, idx, num, group, add_ws_group):
m = self.match(idx)
if m == None:
return
gidx, wsidx = _get_groups(m, group, add_ws_group)
# Append leading match
self.newline += self.line[m.start(0):m.start(gidx)]
# Now align by replacing wsidx with spaces
prefix = self.line[m.start(gidx):m.start(wsidx)]
suffix = self.line[m.end(wsidx):m.end(gidx)]
sp = ''
while True:
bridge = prefix + sp + suffix
if self.new_len(bridge) < num:
sp += ' '
else:
break
self.newline += bridge
# Then append the rest of the match
mnext = self.match(idx + 1)
if mnext == None:
endidx = None
else:
endidx = mnext.start(0)
self.newline += self.line[m.end(gidx):endidx]
def __str__(self):
return self.newline
def _find_max_align(lines, idx, group, add_ws_group):
num = 0
# We will align on 'group', by adding spaces to 'add_ws_group'
for line in lines:
m = line.match(idx)
if m != None:
gidx, wsidx = _get_groups(m, group, add_ws_group)
# until the start
extra = line.line[m.start(0):m.start(wsidx)] + line.line[m.end(wsidx):m.end(gidx)]
# Measure where to align it
l = line.new_len(extra)
else:
l = line.new_len()
if l > num:
num = l
return num
def _regex(view, reg, group, additional_ws, add_ws_group, flags=0):
buf = view.get_buffer()
# Get the selection of lines to align columns on
bounds = buf.get_selection_bounds()
if not bounds:
start = buf.get_iter_at_mark(buf.get_insert())
start.set_line_offset(0)
end = start.copy()
if not end.ends_line():
end.forward_to_line_end()
bounds = (start, end)
if not bounds[0].equal(bounds[1]) and bounds[1].starts_line():
bounds[1].backward_line()
if not bounds[1].ends_line():
bounds[1].forward_to_line_end()
# Get the regular expression from the user
if reg == None:
reg, words, modifier = (yield commander.commands.result.Prompt('Regex:'))
# Compile the regular expression
try:
reg = re.compile(reg, flags)
except Exception as e:
raise commander.commands.exceptions.Execute('Failed to compile regular expression: %s' % (e,))
# Query the user to provide a regex group number to align on
if group == None:
group, words, modifier = (yield commander.commands.result.Prompt('Group (1):'))
try:
group = int(group)
except:
group = 1
# Query the user for additional whitespace to insert for separating items
if additional_ws == None:
additional_ws, words, modifier = (yield commander.commands.result.Prompt('Additional whitespace (0):'))
try:
additional_ws = int(additional_ws)
except:
additional_ws = 0
# Query the user for the regex group number on which to add the
# whitespace
if add_ws_group == None:
add_ws_group, words, modifier = (yield commander.commands.result.Prompt('Whitespace group (1):'))
try:
add_ws_group = int(add_ws_group)
except:
add_ws_group = -1
# By default, add the whitespace on the group on which the columns are
# aligned
if add_ws_group < 0:
add_ws_group = group
start, end = bounds
if not start.starts_line():
start.set_line_offset(0)
if not end.ends_line():
end.forward_to_line_end()
lines = start.get_text(end).splitlines()
newlines = []
num = 0
tabwidth = view.get_tab_width()
# Construct Line objects for all the lines
newlines = [Line(line, reg, tabwidth) for line in lines]
# Calculate maximum number of matches (i.e. columns)
num = reduce(lambda x, y: max(x, y.matches_len()), newlines, 0)
for i in range(num):
al = _find_max_align(newlines, i, group, add_ws_group)
for line in newlines:
line.append(i, al + additional_ws, group, add_ws_group)
# Replace lines
aligned = str.join('\n', [x.newline for x in newlines])
buf.begin_user_action()
buf.delete(bounds[0], bounds[1])
m = buf.create_mark(None, bounds[0], True)
buf.insert(bounds[1], aligned)
buf.select_range(buf.get_iter_at_mark(m), bounds[1])
buf.delete_mark(m)
buf.end_user_action()
yield commander.commands.result.DONE
def __default__(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-sensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group)
def i(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-insensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group, re.IGNORECASE)
# ex:ts=4:et
| gpl-2.0 | 2,460,157,650,009,915,400 | 30.448763 | 190 | 0.637978 | false |
Teagan42/home-assistant | homeassistant/components/google_assistant/const.py | 1 | 4660 | """Constants for Google Assistant."""
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
climate,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
DOMAIN = "google_assistant"
GOOGLE_ASSISTANT_API_ENDPOINT = "/api/google_assistant"
CONF_EXPOSE = "expose"
CONF_ENTITY_CONFIG = "entity_config"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_PROJECT_ID = "project_id"
CONF_ALIASES = "aliases"
CONF_API_KEY = "api_key"
CONF_ROOM_HINT = "room"
CONF_ALLOW_UNLOCK = "allow_unlock"
CONF_SECURE_DEVICES_PIN = "secure_devices_pin"
CONF_REPORT_STATE = "report_state"
CONF_SERVICE_ACCOUNT = "service_account"
CONF_CLIENT_EMAIL = "client_email"
CONF_PRIVATE_KEY = "private_key"
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"climate",
"cover",
"fan",
"group",
"input_boolean",
"light",
"media_player",
"scene",
"script",
"switch",
"vacuum",
"lock",
"binary_sensor",
"sensor",
"alarm_control_panel",
]
PREFIX_TYPES = "action.devices.types."
TYPE_CAMERA = PREFIX_TYPES + "CAMERA"
TYPE_LIGHT = PREFIX_TYPES + "LIGHT"
TYPE_SWITCH = PREFIX_TYPES + "SWITCH"
TYPE_VACUUM = PREFIX_TYPES + "VACUUM"
TYPE_SCENE = PREFIX_TYPES + "SCENE"
TYPE_FAN = PREFIX_TYPES + "FAN"
TYPE_THERMOSTAT = PREFIX_TYPES + "THERMOSTAT"
TYPE_LOCK = PREFIX_TYPES + "LOCK"
TYPE_BLINDS = PREFIX_TYPES + "BLINDS"
TYPE_GARAGE = PREFIX_TYPES + "GARAGE"
TYPE_OUTLET = PREFIX_TYPES + "OUTLET"
TYPE_SENSOR = PREFIX_TYPES + "SENSOR"
TYPE_DOOR = PREFIX_TYPES + "DOOR"
TYPE_TV = PREFIX_TYPES + "TV"
TYPE_SPEAKER = PREFIX_TYPES + "SPEAKER"
TYPE_ALARM = PREFIX_TYPES + "SECURITYSYSTEM"
SERVICE_REQUEST_SYNC = "request_sync"
HOMEGRAPH_URL = "https://homegraph.googleapis.com/"
HOMEGRAPH_SCOPE = "https://www.googleapis.com/auth/homegraph"
HOMEGRAPH_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
REQUEST_SYNC_BASE_URL = HOMEGRAPH_URL + "v1/devices:requestSync"
REPORT_STATE_BASE_URL = HOMEGRAPH_URL + "v1/devices:reportStateAndNotification"
# Error codes used for SmartHomeError class
# https://developers.google.com/actions/reference/smarthome/errors-exceptions
ERR_DEVICE_OFFLINE = "deviceOffline"
ERR_DEVICE_NOT_FOUND = "deviceNotFound"
ERR_VALUE_OUT_OF_RANGE = "valueOutOfRange"
ERR_NOT_SUPPORTED = "notSupported"
ERR_PROTOCOL_ERROR = "protocolError"
ERR_UNKNOWN_ERROR = "unknownError"
ERR_FUNCTION_NOT_SUPPORTED = "functionNotSupported"
ERR_ALREADY_DISARMED = "alreadyDisarmed"
ERR_ALREADY_ARMED = "alreadyArmed"
ERR_CHALLENGE_NEEDED = "challengeNeeded"
ERR_CHALLENGE_NOT_SETUP = "challengeFailedNotSetup"
ERR_TOO_MANY_FAILED_ATTEMPTS = "tooManyFailedAttempts"
ERR_PIN_INCORRECT = "pinIncorrect"
ERR_USER_CANCELLED = "userCancelled"
# Event types
EVENT_COMMAND_RECEIVED = "google_assistant_command"
EVENT_QUERY_RECEIVED = "google_assistant_query"
EVENT_SYNC_RECEIVED = "google_assistant_sync"
DOMAIN_TO_GOOGLE_TYPES = {
camera.DOMAIN: TYPE_CAMERA,
climate.DOMAIN: TYPE_THERMOSTAT,
cover.DOMAIN: TYPE_BLINDS,
fan.DOMAIN: TYPE_FAN,
group.DOMAIN: TYPE_SWITCH,
input_boolean.DOMAIN: TYPE_SWITCH,
light.DOMAIN: TYPE_LIGHT,
lock.DOMAIN: TYPE_LOCK,
media_player.DOMAIN: TYPE_SWITCH,
scene.DOMAIN: TYPE_SCENE,
script.DOMAIN: TYPE_SCENE,
switch.DOMAIN: TYPE_SWITCH,
vacuum.DOMAIN: TYPE_VACUUM,
alarm_control_panel.DOMAIN: TYPE_ALARM,
}
DEVICE_CLASS_TO_GOOGLE_TYPES = {
(cover.DOMAIN, cover.DEVICE_CLASS_GARAGE): TYPE_GARAGE,
(cover.DOMAIN, cover.DEVICE_CLASS_DOOR): TYPE_DOOR,
(switch.DOMAIN, switch.DEVICE_CLASS_SWITCH): TYPE_SWITCH,
(switch.DOMAIN, switch.DEVICE_CLASS_OUTLET): TYPE_OUTLET,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_DOOR): TYPE_DOOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_GARAGE_DOOR): TYPE_GARAGE,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_LOCK): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_OPENING): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_WINDOW): TYPE_SENSOR,
(media_player.DOMAIN, media_player.DEVICE_CLASS_TV): TYPE_TV,
(media_player.DOMAIN, media_player.DEVICE_CLASS_SPEAKER): TYPE_SPEAKER,
(sensor.DOMAIN, sensor.DEVICE_CLASS_TEMPERATURE): TYPE_SENSOR,
(sensor.DOMAIN, sensor.DEVICE_CLASS_HUMIDITY): TYPE_SENSOR,
}
CHALLENGE_ACK_NEEDED = "ackNeeded"
CHALLENGE_PIN_NEEDED = "pinNeeded"
CHALLENGE_FAILED_PIN_NEEDED = "challengeFailedPinNeeded"
STORE_AGENT_USER_IDS = "agent_user_ids"
SOURCE_CLOUD = "cloud"
SOURCE_LOCAL = "local"
| apache-2.0 | 8,215,510,004,811,099,000 | 30.486486 | 80 | 0.71824 | false |
LoyolaCSDepartment/LDA-ICPC-2014 | topic-models/topic-count/xmlsplit.py | 1 | 1409 | #! /usr/bin/env /usr/bin/python3
import os
import sys
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def xmlSplit(infile_name, dest_dir):
try:
# in_file = open('{0}{1}'.format(folder, filename), 'r', encoding='latin_1')
in_file = open(infile_name, 'r', encoding='latin_1')
except IOError:
print("File not found.")
return
dest_dir += '/' # redundant ok; without sucks!
# dest_dir = '{0}input/'.format(folder)
ensure_dir(dest_dir)
file_num = 1
out_file = open('%s%d.txt' % (dest_dir, file_num), 'w')
file_open = True
for x in in_file:
if x[-1] != '\n':
x = '%s\n' % (x)
if not file_open:
file_open = True
out_file = open(next_file, 'w')
# hack to remove non-ascii characters
x = ''.join([c for c in x if ord(c) < 128])
out_file.write('%s' % (x))
if x.startswith('</source>'):
out_file.close()
file_num += 1
next_file = '%s%d.txt' % (dest_dir, file_num)
file_open = False
print('{0} files'.format(file_num - 1) + " left in " + dest_dir)
out_file.close()
in_file.close()
if len(sys.argv) != 3:
print("usage: " + sys.argv[0] + " <input xml file> <output directory>")
sys.exit (-1)
xmlSplit(sys.argv[1], sys.argv[2])
# example call: xmlsplit.py cook.xml /scratch/topics/out')
# xmlSplit('<FIX ME>/topic-models/topic-count/sources/', 'cook.xml')
| mit | 8,342,931,913,768,487,000 | 25.092593 | 80 | 0.582683 | false |
glennmatthews/cot | COT/vm_description/ovf/hardware.py | 1 | 24156 | #!/usr/bin/env python
#
# hardware.py - OVFHardware class
#
# June 2016, Glenn F. Matthews
# Copyright (c) 2013-2016, 2019 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Representation of OVF hardware definitions.
**Classes and Exceptions**
.. autosummary::
:nosignatures:
OVFHardware
OVFHardwareDataError
"""
import copy
import logging
from COT.data_validation import natural_sort
from COT.xml_file import XML
from .item import OVFItem, OVFItemDataError
logger = logging.getLogger(__name__)
class OVFHardwareDataError(Exception):
"""The input data used to construct an :class:`OVFHardware` is not sane."""
class OVFHardware(object):
"""Helper class for :class:`~COT.vm_description.ovf.ovf.OVF`.
Represents all hardware items defined by this OVF;
i.e., the contents of all Items in the VirtualHardwareSection.
Fundamentally it's just a dict of
:class:`~COT.vm_description.ovf.item.OVFItem` objects
with a bunch of helper methods.
"""
def __init__(self, ovf):
"""Construct an OVFHardware object describing all Items in the OVF.
Args:
ovf (OVF): OVF instance to extract hardware information from.
Raises:
OVFHardwareDataError: if any data errors are seen
"""
self.ovf = ovf
self.item_dict = {}
valid_profiles = set(ovf.config_profiles)
item_count = 0
for item in ovf.virtual_hw_section:
namespace = ovf.namespace_for_item_tag(item.tag)
if not namespace:
continue
item_count += 1
# We index the dict by InstanceID as it's the one property of
# an Item that uniquely identifies this set of hardware items.
instance = item.find(namespace + self.ovf.INSTANCE_ID).text
# Pre-sanity check - are all of the profiles associated with this
# item properly defined in the OVF DeploymentOptionSection?
item_profiles = set(item.get(self.ovf.ITEM_CONFIG, "").split())
unknown_profiles = item_profiles - valid_profiles
if unknown_profiles:
raise OVFHardwareDataError("Unknown profile(s) {0} for "
"Item instance {1}"
.format(unknown_profiles, instance))
if instance not in self.item_dict:
self.item_dict[instance] = OVFItem(self.ovf, item)
else:
try:
self.item_dict[instance].add_item(item)
except OVFItemDataError as exc:
logger.debug(exc)
# Mask away the nitty-gritty details from our caller
raise OVFHardwareDataError("Data conflict for instance {0}"
.format(instance))
logger.debug(
"OVF contains %s hardware Item elements describing %s "
"unique devices", item_count, len(self.item_dict))
# Treat the current state as golden:
for ovfitem in self.item_dict.values():
ovfitem.modified = False
def update_xml(self):
"""Regenerate all Items under the VirtualHardwareSection, if needed.
Will do nothing if no Items have been changed.
"""
modified = False
if len(self.item_dict) != len(XML.find_all_children(
self.ovf.virtual_hw_section,
set([self.ovf.ITEM, self.ovf.STORAGE_ITEM,
self.ovf.ETHERNET_PORT_ITEM]))):
modified = True
else:
for ovfitem in self.item_dict.values():
if ovfitem.modified:
modified = True
break
if not modified:
logger.verbose("No changes to hardware definition, "
"so no XML update is required")
return
# Delete the existing Items:
delete_count = 0
for item in list(self.ovf.virtual_hw_section):
if (item.tag == self.ovf.ITEM or
item.tag == self.ovf.STORAGE_ITEM or
item.tag == self.ovf.ETHERNET_PORT_ITEM):
self.ovf.virtual_hw_section.remove(item)
delete_count += 1
logger.debug("Cleared %d existing items from VirtualHWSection",
delete_count)
# Generate the new XML Items, in appropriately sorted order by Instance
ordering = [self.ovf.INFO, self.ovf.SYSTEM, self.ovf.ITEM]
for instance in natural_sort(self.item_dict):
logger.debug("Writing Item(s) with InstanceID %s", instance)
ovfitem = self.item_dict[instance]
new_items = ovfitem.generate_items()
logger.spam("Generated %d items", len(new_items))
for item in new_items:
XML.add_child(self.ovf.virtual_hw_section, item, ordering)
logger.verbose("Updated XML VirtualHardwareSection, now contains %d "
"Items representing %d devices",
len(self.ovf.virtual_hw_section.findall(self.ovf.ITEM)),
len(self.item_dict))
def find_unused_instance_id(self, start=1):
"""Find the first available ``InstanceID`` number.
Args:
start (int): First InstanceID value to consider (disregarding all
lower InstanceIDs, even if available).
Returns:
str: An instance ID that is not yet in use.
"""
instance = int(start)
while str(instance) in self.item_dict.keys():
instance += 1
logger.debug("Found unused InstanceID %d", instance)
return str(instance)
def new_item(self, resource_type, profile_list=None):
"""Create a new OVFItem of the given type.
Args:
resource_type (str): String such as 'cpu' or 'harddisk' - used as
a key to
:data:`~COT.vm_description.ovf.name_helper.OVFNameHelper1.RES_MAP`
profile_list (list): Profiles the new item should belong to
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id()
ovfitem = OVFItem(self.ovf)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.set_property(self.ovf.RESOURCE_TYPE,
self.ovf.RES_MAP[resource_type],
profile_list)
# ovftool freaks out if we leave out the ElementName on an Item,
# so provide a simple default value.
ovfitem.set_property(self.ovf.ELEMENT_NAME, resource_type,
profile_list)
self.item_dict[instance] = ovfitem
ovfitem.modified = True
logger.info("Created new %s under profile(s) %s, InstanceID is %s",
resource_type, profile_list, instance)
return (instance, ovfitem)
def delete_item(self, item):
"""Delete the given Item from the hardware.
Args:
item (OVFItem): Item to delete
"""
instance = item.get_value(self.ovf.INSTANCE_ID)
if self.item_dict[instance] == item:
del self.item_dict[instance]
# TODO: error handling - currently a no-op if item not in item_dict
def clone_item(self, parent_item, profile_list):
"""Clone an OVFItem to create a new instance.
Args:
parent_item (OVFItem): Instance to clone from
profile_list (list): List of profiles to clone into
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id(start=parent_item.instance_id)
logger.spam("Cloning existing Item %s with new instance ID %s",
parent_item, instance)
ovfitem = copy.deepcopy(parent_item)
# Delete any profiles from the parent that we don't need now,
# otherwise we'll get an error when trying to set the instance ID
# on our clone due to self-inconsistency (#64).
for profile in self.ovf.config_profiles:
if ovfitem.has_profile(profile) and profile not in profile_list:
ovfitem.remove_profile(profile)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.modified = True
self.item_dict[instance] = ovfitem
logger.spam("Added clone of %s under %s, instance is %s",
parent_item, profile_list, instance)
return (instance, ovfitem)
def item_match(self, item, resource_type, properties, profile_list):
"""Check whether the given item matches the given filters.
Args:
item (OVFItem): Item to validate
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
bool: True if the item matches all filters, False if not.
"""
if resource_type and (self.ovf.RES_MAP[resource_type] !=
item.get_value(self.ovf.RESOURCE_TYPE)):
return False
if profile_list:
for profile in profile_list:
if not item.has_profile(profile):
return False
for (prop, value) in properties.items():
if item.get_value(prop) != value:
return False
return True
def find_all_items(self, resource_type=None, properties=None,
profile_list=None):
"""Find all items matching the given type, properties, and profiles.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
list: Matching OVFItem instances
"""
items = [self.item_dict[instance] for instance in
natural_sort(self.item_dict)]
filtered_items = []
if properties is None:
properties = {}
for item in items:
if self.item_match(item, resource_type, properties, profile_list):
filtered_items.append(item)
logger.spam("Found %s Items of type %s with properties %s and"
" profiles %s", len(filtered_items), resource_type,
properties, profile_list)
return filtered_items
def find_item(self, resource_type=None, properties=None, profile=None):
"""Find the only OVFItem of the given :attr:`resource_type`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile (str): Single profile ID to search within
Returns:
OVFItem: Matching instance, or None
Raises:
LookupError: if more than one such Item exists.
"""
matches = self.find_all_items(resource_type, properties, [profile])
if len(matches) > 1:
raise LookupError(
"Found multiple matching '{0}' Items (instances {1})"
.format(resource_type, [m.instance_id for m in matches]))
elif len(matches) == 0:
return None
else:
return matches[0]
def get_item_count(self, resource_type, profile):
"""Get the number of Items of the given type for the given profile.
Wrapper for :meth:`get_item_count_per_profile`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile (str): Single profile identifier string to look up.
Returns:
int: Number of items of this type in this profile.
"""
return (self.get_item_count_per_profile(resource_type, [profile])
[profile])
def get_item_count_per_profile(self, resource_type, profile_list):
"""Get the number of Items of the given type per profile.
Items present under "no profile" will be counted against
the total for each profile.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
dict: mapping profile strings to the number of items under each
profile.
"""
count_dict = {}
if not profile_list:
# Get the count under all profiles
profile_list = self.ovf.config_profiles + [None]
for profile in profile_list:
count_dict[profile] = 0
for ovfitem in self.find_all_items(resource_type):
for profile in profile_list:
if ovfitem.has_profile(profile):
count_dict[profile] += 1
for (profile, count) in count_dict.items():
logger.spam("Profile '%s' has %s %s Item(s)",
profile, count, resource_type)
return count_dict
def _update_existing_item_profiles(self, resource_type,
count, profile_list):
"""Change profile membership of existing items as needed.
Helper method for :meth:`set_item_count_per_profile`.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
tuple: (count_dict, items_to_add, last_item)
"""
count_dict = self.get_item_count_per_profile(resource_type,
profile_list)
items_seen = dict.fromkeys(profile_list, 0)
last_item = None
# First, iterate over existing Items.
# Once we've seen "count" items under a profile, remove all subsequent
# items from this profile.
# If we don't have enough items under a profile, add any items found
# under other profiles to this profile as well.
for ovfitem in self.find_all_items(resource_type):
last_item = ovfitem
for profile in profile_list:
if ovfitem.has_profile(profile):
if items_seen[profile] >= count:
# Too many items - remove this one!
ovfitem.remove_profile(profile)
else:
items_seen[profile] += 1
else:
if count_dict[profile] < count:
# Add this profile to this Item
ovfitem.add_profile(profile)
count_dict[profile] += 1
items_seen[profile] += 1
# How many new Items do we need to create in total?
items_to_add = 0
for profile in profile_list:
delta = count - items_seen[profile]
if delta > items_to_add:
items_to_add = delta
return count_dict, items_to_add, last_item
def _update_cloned_item(self, new_item, new_item_profiles, item_count):
"""Update a cloned item to make it distinct from its parent.
Helper method for :meth:`set_item_count_per_profile`.
Args:
new_item (OVFItem): Newly cloned Item
new_item_profiles (list): Profiles new_item should belong to
item_count (int): How many Items of this type (including this
item) now exist. Used with
:meth:`COT.platform.Platform.guess_nic_name`
Returns:
OVFItem: Updated :param:`new_item`
Raises:
NotImplementedError: No support yet for updating ``Address``
NotImplementedError: If updating ``AddressOnParent`` but the
prior value varies across config profiles.
NotImplementedError: if ``AddressOnParent`` is not an integer.
"""
resource_type = new_item.hardware_type
address = new_item.get(self.ovf.ADDRESS)
if address:
raise NotImplementedError("Don't know how to ensure a unique "
"Address value when cloning an Item "
"of type {0}".format(resource_type))
address_on_parent = new_item.get(self.ovf.ADDRESS_ON_PARENT)
if address_on_parent:
address_list = new_item.get_all_values(self.ovf.ADDRESS_ON_PARENT)
if len(address_list) > 1:
raise NotImplementedError("AddressOnParent is not common "
"across all profiles but has "
"multiple values {0}. COT can't "
"handle this yet."
.format(address_list))
address_on_parent = address_list[0]
# Currently we only handle integer addresses
try:
address_on_parent = int(address_on_parent)
address_on_parent += 1
new_item.set_property(self.ovf.ADDRESS_ON_PARENT,
str(address_on_parent),
new_item_profiles)
except ValueError:
raise NotImplementedError("Don't know how to ensure a "
"unique AddressOnParent value "
"given base value '{0}'"
.format(address_on_parent))
if resource_type == 'ethernet':
# Update ElementName to reflect the NIC number
element_name = self.ovf.platform.guess_nic_name(item_count)
new_item.set_property(self.ovf.ELEMENT_NAME, element_name,
new_item_profiles)
return new_item
def set_item_count_per_profile(self, resource_type, count, profile_list):
"""Set the number of items of a given type under the given profile(s).
If the new count is greater than the current count under this
profile, then additional instances that already exist under
another profile will be added to this profile, starting with
the lowest-sequence instance not already present, and only as
a last resort will new instances be created.
If the new count is less than the current count under this profile,
then the highest-numbered instances will be removed preferentially.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
"""
if not profile_list:
# Set the profile list for all profiles, including the default
profile_list = self.ovf.config_profiles + [None]
count_dict, items_to_add, last_item = \
self._update_existing_item_profiles(
resource_type, count, profile_list)
logger.debug("Creating %d new items", items_to_add)
while items_to_add > 0:
# Which profiles does this Item need to belong to?
new_item_profiles = []
for profile in profile_list:
if count_dict[profile] < count:
new_item_profiles.append(profile)
count_dict[profile] += 1
if last_item is None:
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, new_item) = self.new_item(resource_type, new_item_profiles)
else:
(_, new_item) = self.clone_item(last_item, new_item_profiles)
# Check/update other properties of the clone that should be unique:
# TODO - we assume that the count is the same across profiles
new_item = self._update_cloned_item(
new_item, new_item_profiles, count_dict[new_item_profiles[0]])
last_item = new_item
items_to_add -= 1
def set_value_for_all_items(self, resource_type, prop_name, new_value,
profile_list, create_new=False):
"""Set a property to the given value for all items of the given type.
If no items of the given type exist, will create a new ``Item`` if
:attr:`create_new` is set to ``True``; otherwise will log a warning
and do nothing.
Args:
resource_type (str): Resource type such as 'cpu' or 'harddisk'
prop_name (str): Property name to update
new_value (str): New value to set the property to
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
create_new (bool): Whether to create a new entry if no items
of this :attr:`resource_type` presently exist.
"""
ovfitem_list = self.find_all_items(resource_type)
if not ovfitem_list:
if not create_new:
logger.warning("No items of type %s found. Nothing to do.",
resource_type)
return
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, ovfitem) = self.new_item(resource_type, profile_list)
ovfitem_list = [ovfitem]
for ovfitem in ovfitem_list:
ovfitem.set_property(prop_name, new_value, profile_list)
logger.debug("Updated %s %s to %s under profiles %s",
resource_type, prop_name, new_value, profile_list)
def set_item_values_per_profile(self, resource_type, prop_name, value_list,
profile_list, default=None):
"""Set value(s) for a property of multiple items of a type.
Args:
resource_type (str): Device type such as 'harddisk' or 'cpu'
prop_name (str): Property name to update
value_list (list): List of values to set (one value per item of the
given :attr:`resource_type`)
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
default (str): If there are more matching items than entries in
:attr:`value_list`, set extra items to this value
"""
if profile_list is None:
profile_list = self.ovf.config_profiles + [None]
for ovfitem in self.find_all_items(resource_type):
if len(value_list):
new_value = value_list.pop(0)
else:
new_value = default
for profile in profile_list:
if ovfitem.has_profile(profile):
ovfitem.set_property(prop_name, new_value, [profile])
logger.info("Updated %s property %s to %s under %s",
resource_type, prop_name, new_value, profile_list)
if len(value_list):
logger.warning("After scanning all known %s Items, not all "
"%s values were used - leftover %s",
resource_type, prop_name, value_list)
| mit | -7,742,814,765,775,915,000 | 41.603175 | 79 | 0.573812 | false |
vlegoff/tsunami | src/primaires/scripting/config.py | 1 | 2340 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la configuration par défaut du module 'scripting'."""
cfg_exportation = r"""
# Ce fichier contient la configuration de l'exportation du scripting.
# L'exportation du scripting permet de générer automtiquement
# la documentation des fonctions et actions du scripting et de
# l'enregistrer dans un fichier txt au format Dokuwiki.
# Cette exportation est désactivée par défaut. Si vous voulez l'activer,
# lisez bien la configuration qui suit.
# Pour activer l'exportation, mettez l'option qui suit à True.
active = False
# Chemin du fichier dans lequel écrire la documentation des actions
chemin_doc_actions = "actions.txt"
# Chemin du fichier dans lequel écrire la documentation des fonctions
chemin_doc_fonctions = "fonctions.txt"
"""
| bsd-3-clause | -3,883,499,589,447,918,600 | 44.705882 | 79 | 0.780352 | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/scrapers/izlemeyedeger_scraper.py | 1 | 3982 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://www.izlemeyedeger.com'
class IzlemeyeDeger_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'IzlemeyeDeger'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'views' in item and item['views']:
label += ' (%s views)' % item['views']
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
embed_url = dom_parser.parse_dom(html, 'meta', {'itemprop': 'embedURL'}, ret='content')
if embed_url:
html = self._http_get(embed_url[0], cache_limit=.5)
for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
stream_url, height = match.groups()
stream_url = stream_url.replace('\\&', '&')
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(embed_url[0]))
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year, season=''):
results = []
search_url = urlparse.urljoin(self.base_url, '/arama?q=%s')
search_url = search_url % (urllib.quote_plus(title))
html = self._http_get(search_url, cache_limit=1)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'section'})
if fragment:
for match in re.finditer('href="([^"]+).*?class="year">\s*(\d+).*?class="video-title">\s*([^<]+)', fragment[0], re.DOTALL):
url, match_year, match_title = match.groups('')
match_title = match_title.strip()
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
| gpl-2.0 | -3,967,336,712,143,353,000 | 40.479167 | 165 | 0.592416 | false |
jabesq/home-assistant | homeassistant/components/sensibo/climate.py | 1 | 12181 | """Support for Sensibo wifi-enabled home thermostats."""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
import pysensibo
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT_COOL, HVAC_MODE_COOL, HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE, ATTR_TEMPERATURE, CONF_API_KEY, CONF_ID,
STATE_ON, TEMP_CELSIUS, TEMP_FAHRENHEIT)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
from .const import DOMAIN as SENSIBO_DOMAIN
_LOGGER = logging.getLogger(__name__)
ALL = ['all']
TIMEOUT = 10
SERVICE_ASSUME_STATE = 'assume_state'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
})
ASSUME_STATE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_STATE): cv.string,
})
_FETCH_FIELDS = ','.join([
'room{name}', 'measurements', 'remoteCapabilities',
'acState', 'connectionStatus{isAlive}', 'temperatureUnit'])
_INITIAL_FETCH_FIELDS = 'id,' + _FETCH_FIELDS
FIELD_TO_FLAG = {
'fanLevel': SUPPORT_FAN_MODE,
'swing': SUPPORT_SWING_MODE,
'targetTemperature': SUPPORT_TARGET_TEMPERATURE,
}
SENSIBO_TO_HA = {
"cool": HVAC_MODE_COOL,
"heat": HVAC_MODE_HEAT,
"fan": HVAC_MODE_FAN_ONLY,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up Sensibo devices."""
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass),
timeout=TIMEOUT)
devices = []
try:
for dev in (
await client.async_get_devices(_INITIAL_FETCH_FIELDS)):
if config[CONF_ID] == ALL or dev['id'] in config[CONF_ID]:
devices.append(SensiboClimate(
client, dev, hass.config.units.temperature_unit))
except (aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError, pysensibo.SensiboError):
_LOGGER.exception('Failed to connect to Sensibo servers.')
raise PlatformNotReady
if not devices:
return
async_add_entities(devices)
async def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [device for device in devices
if device.entity_id in entity_ids]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
await climate.async_assume_state(
service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
SENSIBO_DOMAIN, SERVICE_ASSUME_STATE, async_assume_state,
schema=ASSUME_STATE_SCHEMA)
class SensiboClimate(ClimateDevice):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data['id']
self._external_state = None
self._units = units
self._available = False
self._do_update(data)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data['room']['name']
self._measurements = data['measurements']
self._ac_states = data['acState']
self._available = data['connectionStatus']['isAlive']
capabilities = data['remoteCapabilities']
self._operations = [SENSIBO_TO_HA[mode] for mode
in capabilities['modes']]
self._operations.append(HVAC_MODE_OFF)
self._current_capabilities = \
capabilities['modes'][self._ac_states['mode']]
temperature_unit_key = data.get('temperatureUnit') or \
self._ac_states.get('temperatureUnit')
if temperature_unit_key:
self._temperature_unit = TEMP_CELSIUS if \
temperature_unit_key == 'C' else TEMP_FAHRENHEIT
self._temperatures_list = self._current_capabilities[
'temperatures'].get(temperature_unit_key, {}).get('values', [])
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {'battery': self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get('targetTemperature')
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._ac_states['on']:
return HVAC_MODE_OFF
return SENSIBO_TO_HA.get(self._ac_states['mode'])
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements['humidity']
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get('batteryVoltage')
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements['temperature'],
TEMP_CELSIUS,
self.temperature_unit)
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._operations
@property
def fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get('fanLevel')
@property
def fan_modes(self):
"""List of available fan modes."""
return self._current_capabilities.get('fanLevels')
@property
def swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get('swing')
@property
def swing_modes(self):
"""List of available swing modes."""
return self._current_capabilities.get('swing')
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._temperatures_list[0] \
if self._temperatures_list else super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._temperatures_list[-1] \
if self._temperatures_list else super().max_temp
@property
def unique_id(self):
"""Return unique ID based on Sensibo ID."""
return self._id
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = int(temperature)
if temperature not in self._temperatures_list:
# Requested temperature is not supported.
if temperature == self.target_temperature:
return
index = self._temperatures_list.index(self.target_temperature)
if temperature > self.target_temperature and index < len(
self._temperatures_list) - 1:
temperature = self._temperatures_list[index + 1]
elif temperature < self.target_temperature and index > 0:
temperature = self._temperatures_list[index - 1]
else:
return
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'targetTemperature', temperature, self._ac_states)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'fanLevel', fan_mode, self._ac_states)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'on', False, self._ac_states)
return
# Turn on if not currently on.
if not self._ac_states['on']:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'on', True, self._ac_states)
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'mode', HA_TO_SENSIBO[hvac_mode],
self._ac_states)
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'swing', swing_mode, self._ac_states)
async def async_assume_state(self, state):
"""Set external state."""
change_needed = \
(state != HVAC_MODE_OFF and not self._ac_states['on']) \
or (state == HVAC_MODE_OFF and self._ac_states['on'])
if change_needed:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id,
'on',
state != HVAC_MODE_OFF, # value
self._ac_states,
True # assumed_state
)
if state in [STATE_ON, HVAC_MODE_OFF]:
self._external_state = None
else:
self._external_state = state
async def async_update(self):
"""Retrieve latest state."""
try:
with async_timeout.timeout(TIMEOUT):
data = await self._client.async_get_device(
self._id, _FETCH_FIELDS)
self._do_update(data)
except (aiohttp.client_exceptions.ClientError,
pysensibo.SensiboError):
_LOGGER.warning('Failed to connect to Sensibo servers.')
self._available = False
| apache-2.0 | 2,443,996,054,491,478,500 | 33.605114 | 79 | 0.605205 | false |
foyzur/gpdb | gpMgmt/bin/gppylib/test/behave_utils/utils.py | 4 | 64021 | #!/usr/bin/env python
import re, os, signal, time, filecmp, stat, fileinput
import yaml
from gppylib.commands.gp import GpStart, chk_local_db_running
from gppylib.commands.base import Command, ExecutionError, REMOTE
from gppylib.db import dbconn
from gppylib.gparray import GpArray, MODE_SYNCHRONIZED
from gppylib.operations.backup_utils import pg, escapeDoubleQuoteInSQLString
PARTITION_START_DATE = '2010-01-01'
PARTITION_END_DATE = '2013-01-01'
GET_APPENDONLY_DATA_TABLE_INFO_SQL ="""SELECT ALL_DATA_TABLES.oid, ALL_DATA_TABLES.schemaname, ALL_DATA_TABLES.tablename, OUTER_PG_CLASS.relname as tupletable FROM(
SELECT ALLTABLES.oid, ALLTABLES.schemaname, ALLTABLES.tablename FROM
(SELECT c.oid, n.nspname AS schemaname, c.relname AS tablename FROM pg_class c, pg_namespace n
WHERE n.oid = c.relnamespace) as ALLTABLES,
(SELECT n.nspname AS schemaname, c.relname AS tablename
FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace
WHERE c.relkind = 'r'::"char" AND c.oid > 16384 AND (c.relnamespace > 16384 or n.nspname = 'public')
EXCEPT
((SELECT x.schemaname, x.partitiontablename FROM
(SELECT distinct schemaname, tablename, partitiontablename, partitionlevel FROM pg_partitions) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel FROM pg_partitions group by (tablename, schemaname))
as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel != Y.maxlevel)
UNION (SELECT distinct schemaname, tablename FROM pg_partitions))) as DATATABLES
WHERE ALLTABLES.schemaname = DATATABLES.schemaname and ALLTABLES.tablename = DATATABLES.tablename AND ALLTABLES.oid not in (select reloid from pg_exttable)
) as ALL_DATA_TABLES, pg_appendonly, pg_class OUTER_PG_CLASS
WHERE ALL_DATA_TABLES.oid = pg_appendonly.relid
AND OUTER_PG_CLASS.oid = pg_appendonly.segrelid
"""
GET_ALL_AO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 'f'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
GET_ALL_CO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 't'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if master_data_dir is None:
raise Exception('MASTER_DATA_DIRECTORY is not set')
def execute_sql(dbname, sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, sql)
conn.commit()
def execute_sql_singleton(dbname, sql):
result = None
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, sql)
if result is None:
raise Exception("error running query: %s" % sql)
return result
def has_exception(context):
if not hasattr(context, 'exception'):
return False
if context.exception:
return True
else:
return False
def run_command(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_cmd(command):
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
print 'caught exception %s'%e
result = cmd.get_results()
return (result.rc, result.stdout, result.stderr)
def run_command_remote(context,command, host, source_file, export_mdd):
cmd = Command(name='run command %s'%command,
cmdStr='gpssh -h %s -e \'source %s; %s; %s\''%(host, source_file,export_mdd, command))
cmd.run(validateAfter=True)
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_gpcommand(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='$GPHOME/bin/%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def check_stdout_msg(context, msg):
pat = re.compile(msg)
if not pat.search(context.stdout_message):
err_str = "Expected stdout string '%s' and found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_string_not_present_stdout(context, msg):
pat = re.compile(msg)
if pat.search(context.stdout_message):
err_str = "Did not expect stdout string '%s' but found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_err_msg(context, err_msg):
if not hasattr(context, 'exception'):
raise Exception('An exception was not raised and it was expected')
pat = re.compile(err_msg)
if not pat.search(context.error_message):
err_str = "Expected error string '%s' and found: '%s'" % (err_msg, context.error_message)
raise Exception(err_str)
def check_return_code(context, ret_code):
if context.ret_code != int(ret_code):
emsg = ""
if context.error_message:
emsg += context.error_message
raise Exception("expected return code '%s' does not equal actual return code '%s' %s" % (ret_code, context.ret_code, emsg))
def check_database_is_running(context):
if not 'PGPORT' in os.environ:
raise Exception('PGPORT should be set')
pgport = int(os.environ['PGPORT'])
running_status = chk_local_db_running(master_data_dir, pgport)
gpdb_running = running_status[0] and running_status[1] and running_status[2] and running_status[3]
return gpdb_running
def start_database_if_not_started(context):
if not check_database_is_running(context):
start_database(context)
def start_database(context):
run_gpcommand(context, 'gpstart -a')
if context.exception:
raise context.exception
def stop_database_if_started(context):
if check_database_is_running(context):
stop_database(context)
def stop_database(context):
run_gpcommand(context, 'gpstop -M fast -a')
if context.exception:
raise context.exception
def getRows(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def getRow(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
result = curs.fetchone()
return result
def check_db_exists(dbname, host=None, port=0, user=None):
LIST_DATABASE_SQL = 'select datname from pg_database'
results = []
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname='template1')) as conn:
curs = dbconn.execSQL(conn, LIST_DATABASE_SQL)
results = curs.fetchall()
for result in results:
if result[0] == dbname:
return True
return False
def create_database_if_not_exists(context, dbname, host=None, port=0, user=None):
if not check_db_exists(dbname, host, port, user):
create_database(context, dbname, host, port, user)
def create_database(context, dbname=None, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
createdb_cmd = 'createdb %s' % dbname
else:
createdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "create database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_command(context, createdb_cmd)
if context.exception:
time.sleep(1)
continue
if check_db_exists(dbname, host, port, user):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception("create database for '%s' failed after %d attempts" % (dbname, LOOPS))
def clear_all_saved_data_verify_files(context):
current_dir = os.getcwd()
data_dir = os.path.join(current_dir, './gppylib/test/data')
cmd = 'rm %s/*' % data_dir
run_command(context, cmd)
def get_table_data_to_file(filename, tablename, dbname):
current_dir = os.getcwd()
filename = os.path.join(current_dir, './gppylib/test/data', filename)
order_sql = """
select string_agg(a, ',')
from (
select generate_series(1,c.relnatts+1) as a
from pg_class as c
inner join pg_namespace as n
on c.relnamespace = n.oid
where (n.nspname || '.' || c.relname = E'%s')
or c.relname = E'%s'
) as q;
""" % (pg.escape_string(tablename), pg.escape_string(tablename))
query = order_sql
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
try:
res = dbconn.execSQLForSingleton(conn, query)
# check if tablename is fully qualified <schema_name>.<table_name>
if '.' in tablename:
schema_name, table_name = tablename.split('.')
data_sql = '''COPY (select gp_segment_id, * from "%s"."%s" order by %s) TO '%s' ''' % (escapeDoubleQuoteInSQLString(schema_name, False),
escapeDoubleQuoteInSQLString(table_name, False), res, filename)
else:
data_sql = '''COPY (select gp_segment_id, * from "%s" order by %s) TO '%s' ''' %(escapeDoubleQuoteInSQLString(tablename, False), res, filename)
query = data_sql
dbconn.execSQL(conn, query)
conn.commit()
except Exception as e:
print "Cannot execute the query '%s' on the connection %s" % (query, str(dbconn.DbURL(dbname=dbname)))
print "Exception: %s" % str(e)
conn.close()
def diff_backup_restore_data(context, backup_file, restore_file):
if not filecmp.cmp(backup_file, restore_file):
raise Exception('%s and %s do not match' % (backup_file, restore_file))
def validate_restore_data(context, tablename, dbname, backedup_table=None):
filename = tablename.strip() + "_restore"
get_table_data_to_file(filename, tablename, dbname)
current_dir = os.getcwd()
if backedup_table != None:
backup_file = os.path.join(current_dir, './gppylib/test/data', backedup_table.strip() + "_backup")
else:
backup_file = os.path.join(current_dir, './gppylib/test/data', tablename.strip() + "_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', tablename.strip() + "_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def validate_restore_data_in_file(context, tablename, dbname, file_name, backedup_table=None):
filename = file_name + "_restore"
get_table_data_to_file(filename, tablename, dbname)
current_dir = os.getcwd()
if backedup_table != None:
backup_file = os.path.join(current_dir, './gppylib/test/data', backedup_table.strip() + "_backup")
else:
backup_file = os.path.join(current_dir, './gppylib/test/data', file_name + "_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', file_name + "_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def validate_db_data(context, dbname, expected_table_count):
tbls = get_table_names(dbname)
if len(tbls) != expected_table_count:
raise Exception("db %s does not have expected number of tables %d != %d" % (dbname, expected_table_count, len(tbls)))
for t in tbls:
name = "%s.%s" % (t[0], t[1])
validate_restore_data(context, name, dbname)
def get_segment_hostnames(context, dbname):
sql = "select distinct(hostname) from gp_segment_configuration where content != -1;"
return getRows(dbname, sql)
def backup_db_data(context, dbname):
tbls = get_table_names(dbname)
for t in tbls:
nm = "%s.%s" % (t[0], t[1])
backup_data(context, nm, dbname)
def backup_data(context, tablename, dbname):
filename = tablename + "_backup"
get_table_data_to_file(filename, tablename, dbname)
def backup_data_to_file(context, tablename, dbname, filename):
filename = filename + "_backup"
get_table_data_to_file(filename, tablename, dbname)
def check_partition_table_exists(context, dbname, schemaname, table_name, table_type=None, part_level=1, part_number=1):
partitions = get_partition_names(schemaname, table_name, dbname, part_level, part_number)
if not partitions:
return False
return check_table_exists(context, dbname, partitions[0][0].strip(), table_type)
def check_table_exists(context, dbname, table_name, table_type=None, host=None, port=0, user=None):
if '.' in table_name:
schemaname, tablename = table_name.split('.')
SQL = """
select c.oid, c.relkind, c.relstorage, c.reloptions
from pg_class c, pg_namespace n
where c.relname = E'%s' and n.nspname = E'%s' and c.relnamespace = n.oid;
""" % (pg.escape_string(tablename), pg.escape_string(schemaname))
else:
SQL = """
select oid, relkind, relstorage, reloptions \
from pg_class \
where relname = E'%s'; \
""" % pg.escape_string(table_name)
table_row = None
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
try:
table_row = dbconn.execSQLForSingletonRow(conn, SQL)
except Exception as e:
context.exception = e
return False
if table_type is None:
return True
if table_row[2] == 'a':
original_table_type = 'ao'
elif table_row[2] == 'c':
original_table_type = 'co'
elif table_row[2] == 'h':
original_table_type = 'heap'
elif table_row[2] == 'x':
original_table_type = 'external'
elif table_row[2] == 'v':
original_table_type = 'view'
else:
raise Exception('Unknown table type %s' % table_row[2])
if original_table_type != table_type.strip():
return False
return True
def check_pl_exists(context, dbname, lan_name):
SQL = """select count(*) from pg_language where lanname='%s';""" % lan_name
lan_count = getRows(dbname, SQL)[0][0]
if lan_count == 0:
return False
return True
def check_constraint_exists(context, dbname, conname):
SQL = """select count(*) from pg_constraint where conname='%s';""" % conname
con_count = getRows(dbname, SQL)[0][0]
if con_count == 0:
return False
return True
def drop_external_table_if_exists(context, table_name, dbname):
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external'):
drop_external_table(context, table_name=table_name, dbname=dbname)
def drop_table_if_exists(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table if exists %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
def drop_external_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop external table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external', host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def drop_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def check_schema_exists(context, schema_name, dbname):
schema_check_sql = "select * from pg_namespace where nspname='%s';" % schema_name
if len(getRows(dbname, schema_check_sql)) < 1:
return False
return True
def drop_schema_if_exists(context, schema_name, dbname):
if check_schema_exists(context, schema_name, dbname):
drop_schema(context, schema_name, dbname)
def drop_schema(context, schema_name, dbname):
SQL = 'drop schema %s cascade' % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_schema_exists(context, schema_name, dbname):
raise Exception('Unable to successfully drop the schema %s' % schema_name)
def validate_table_data_on_segments(context, tablename, dbname):
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % tablename
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0' :
raise Exception('Data not present in segment %s' % row[0])
def get_table_names(dbname):
sql = """
SELECT n.nspname AS schemaname, c.relname AS tablename\
FROM pg_class c\
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace\
WHERE c.relkind = 'r'::"char" AND c.oid > 16384 AND (c.relnamespace > 16384 or n.nspname = 'public')
AND n.nspname NOT LIKE 'pg_temp_%'
"""
return getRows(dbname, sql)
def get_partition_tablenames(tablename, dbname, part_level = 1):
child_part_sql = "select partitiontablename from pg_partitions where tablename='%s' and partitionlevel=%s;" % (tablename, part_level)
rows = getRows(dbname, child_part_sql)
return rows
def get_partition_names(schemaname, tablename, dbname, part_level, part_number):
part_num_sql = """select partitionschemaname || '.' || partitiontablename from pg_partitions
where schemaname='%s' and tablename='%s'
and partitionlevel=%s and partitionposition=%s;""" % (schemaname, tablename, part_level, part_number)
rows = getRows(dbname, part_num_sql)
return rows
def validate_part_table_data_on_segments(context, tablename, part_level, dbname):
rows = get_partition_tablenames(tablename, dbname, part_level)
for part_tablename in rows :
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % part_tablename[0]
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0' :
raise Exception('Data not present in segment %s' % row[0])
def validate_mixed_partition_storage_types(context, tablename, dbname):
partition_names = get_partition_tablenames(tablename, dbname, part_level = 1)
for position, partname in enumerate(partition_names):
if position in(0, 2, 5, 7):
storage_type = 'c'
elif position in(1, 3, 6, 8):
storage_type = 'a'
else:
storage_type = 'h'
for part in partname:
validate_storage_type(context, part, storage_type, dbname)
def validate_storage_type(context, partname, storage_type, dbname):
storage_type_sql = "select oid::regclass, relstorage from pg_class where oid = '%s'::regclass;" % (partname)
rows = getRows(dbname, storage_type_sql)
for row in rows:
if row[1].strip() != storage_type.strip():
raise Exception("The storage type of the partition %s is not as expected %s "% (row[1], storage_type))
def create_mixed_storage_partition(context, tablename, dbname):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template ( \
subpartition s_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
subpartition s_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
subpartition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
subpartition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
subpartition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
(partition p1 values('backup') , partition p2 values('restore')) \
;" % (tablename, table_definition)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0)
def create_external_partition(context, tablename, dbname, port, filename):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by range(Column3) ( \
partition p_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
partition p_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
partition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
partition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
partition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
;" % (tablename, table_definition)
master_hostname = get_master_hostname();
create_ext_table_str = "Create readable external table %s_ret (%s) \
location ('gpfdist://%s:%s/%s') \
format 'csv' encoding 'utf-8' \
log errors segment reject limit 1000 \
;" % (tablename, table_definition, master_hostname[0][0].strip(), port, filename)
alter_table_str = "Alter table %s exchange partition p_2 \
with table %s_ret without validation \
;" % (tablename, tablename)
drop_table_str = "Drop table %s_ret;" % (tablename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
dbconn.execSQL(conn, create_ext_table_str)
dbconn.execSQL(conn, alter_table_str)
dbconn.execSQL(conn, drop_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0, 100)
def modify_partition_data(context, tablename, dbname, partitionnum):
# ONLY works for partition 1 to 3
if partitionnum == 1:
year = '2010'
elif partitionnum == 2:
year = '2011'
elif partitionnum == 3:
year = '2012'
else:
raise Exception("BAD PARAM to modify_partition_data %s" % partitionnum)
cmdStr = """ echo "90,backup,%s-12-30" | psql -d %s -c "copy %s from stdin delimiter ',';" """ % (year, dbname, tablename)
for i in range(10):
cmd = Command(name='insert data into %s' % tablename, cmdStr=cmdStr)
cmd.run(validateAfter=True)
def modify_data(context, tablename, dbname):
cmdStr = 'psql -d %s -c "copy %s to stdout;" | psql -d %s -c "copy %s from stdin;"' % (dbname, tablename, dbname, tablename)
cmd = Command(name='insert data into %s' % tablename, cmdStr=cmdStr)
cmd.run(validateAfter=True)
def add_partition(context, partitionnum, tablename, dbname):
alter_table_str = "alter table %s add default partition p%s; insert into %s select i+%d, 'update', i + date '%s' from generate_series(0,1094) as i" \
% (tablename, partitionnum, tablename, int(partitionnum), PARTITION_START_DATE)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, alter_table_str)
conn.commit()
def drop_partition(context, partitionnum, tablename, dbname):
alter_table_str = "alter table %s drop partition p%s;" % (tablename, partitionnum)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, alter_table_str)
conn.commit()
def create_partition(context, tablename, storage_type, dbname, compression_type=None, partition=True, rowcount=1094, with_data=True, host=None, port=0, user=None):
interval = '1 year'
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table " + tablename + "(" + table_definition + ")"
storage_type_dict = {'ao':'row', 'co':'column'}
part_table = " Distributed Randomly Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template \
(start (date '%s') end (date '%s') every (interval '%s')) \
(Partition p1 values('backup') , Partition p2 values('restore')) " \
%(PARTITION_START_DATE, PARTITION_END_DATE, interval)
if storage_type == "heap":
create_table_str = create_table_str
if partition:
create_table_str = create_table_str + part_table
elif storage_type == "ao" or storage_type == "co":
create_table_str = create_table_str + " WITH(appendonly = true, orientation = %s) " % storage_type_dict[storage_type]
if compression_type is not None:
create_table_str = create_table_str[:-2] + ", compresstype = " + compression_type + ") "
if partition:
create_table_str = create_table_str + part_table
create_table_str = create_table_str + ";"
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
if with_data:
populate_partition(tablename, PARTITION_START_DATE, dbname, 0, rowcount, host, port, user)
# same data size as populate partition, but different values
def populate_partition_diff_data_same_eof(tablename, dbname):
populate_partition(tablename, PARTITION_START_DATE, dbname, 1)
def populate_partition_same_data(tablename, dbname):
populate_partition(tablename, PARTITION_START_DATE, dbname, 0)
def populate_partition(tablename, start_date, dbname, data_offset, rowcount=1094, host=None, port=0, user=None):
insert_sql_str = "insert into %s select i+%d, 'backup', i + date '%s' from generate_series(0,%d) as i" %(tablename, data_offset, start_date, rowcount)
insert_sql_str += "; insert into %s select i+%d, 'restore', i + date '%s' from generate_series(0,%d) as i" %(tablename, data_offset, start_date, rowcount)
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, insert_sql_str)
conn.commit()
def create_indexes(context, table_name, indexname, dbname):
btree_index_sql = "create index btree_%s on %s using btree(column1);" % (indexname, table_name)
bitmap_index_sql = "create index bitmap_%s on %s using bitmap(column3);" % (indexname, table_name)
index_sql = btree_index_sql + bitmap_index_sql
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, index_sql)
conn.commit()
validate_index(context, table_name, dbname)
def validate_index(context, table_name, dbname):
index_sql = "select count(indexrelid::regclass) from pg_index, pg_class where indrelid = '%s'::regclass group by indexrelid;" % table_name
rows = getRows(dbname, index_sql)
if len(rows) != 2:
raise Exception('Index creation was not successful. Expected 2 rows does not match %d rows' % result)
def create_schema(context, schema_name, dbname):
if not check_schema_exists(context, schema_name, dbname):
schema_sql = "create schema %s" % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, schema_sql)
conn.commit()
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
CREATE_TABLE_SQL = None
NROW = 1000
table_type = table_type.upper()
if table_type == 'AO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (table_name, NROW)
elif table_type == 'CO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (table_name, NROW)
elif table_type == 'HEAP':
CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (table_name, NROW)
if CREATE_TABLE_SQL is None:
raise Exception('Invalid table type specified')
SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, CREATE_TABLE_SQL)
conn.commit()
result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
if result != NROW:
raise Exception('Integer table creation was not successful. Expected %d does not match %d' %(NROW, result))
def drop_database(context, dbname, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
dropdb_cmd = 'dropdb %s' % dbname
else:
dropdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "drop database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_gpcommand(context, dropdb_cmd)
if context.exception:
time.sleep(1)
continue
if not check_db_exists(dbname):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception('db exists after dropping: %s' % dbname)
def drop_database_if_exists(context, dbname=None, host=None, port=0, user=None):
if check_db_exists(dbname, host=host, port=port, user=user):
drop_database(context, dbname, host=host, port=port, user=user)
def run_on_all_segs(context, dbname, query):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
with dbconn.connect(dbconn.DbURL(dbname=dbname, hostname=seg.getSegmentHostName(), port=seg.getSegmentPort()), utility=True) as conn:
dbconn.execSQL(conn, query)
conn.commit()
def get_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='ifconfig nic', cmdStr='sudo /sbin/ifconfig %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
return 'UP' in cmd.get_results().stdout
def bring_nic_down(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring down nic', cmdStr='sudo /sbin/ifdown %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if get_nic_up(hostname, nic):
raise Exception('Unable to bring down nic %s on host %s' % (nic, hostname))
def bring_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring up nic', cmdStr='sudo /sbin/ifup %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if not get_nic_up(hostname, nic):
raise Exception('Unable to bring up nic %s on host %s' % (nic, hostname))
def are_segments_synchronized():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.mode != MODE_SYNCHRONIZED:
return False
return True
def get_distribution_policy(dbname):
filename = dbname.strip() + "_dist_policy_backup"
get_dist_policy_to_file(filename, dbname)
def get_dist_policy_to_file(filename, dbname):
dist_policy_sql = " \
SELECT \
c.relname as tablename, p.attrnums as distribution_policy \
FROM \
pg_class c \
INNER JOIN \
gp_distribution_policy p \
ON (c.relfilenode = p.localoid) \
AND \
c.relstorage != 'x' \
ORDER BY c.relname"
current_dir = os.getcwd()
filename = os.path.join(current_dir, './gppylib/test/data', filename)
data_sql = "COPY (%s) TO '%s'" %(dist_policy_sql, filename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, data_sql)
conn.commit()
def validate_distribution_policy(context, dbname):
filename = dbname.strip() + "_dist_policy_restore"
get_dist_policy_to_file(filename, dbname)
current_dir = os.getcwd()
backup_file = os.path.join(current_dir, './gppylib/test/data', dbname.strip() + "_dist_policy_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', dbname.strip() + "_dist_policy_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def check_row_count(tablename, dbname, nrows):
NUM_ROWS_QUERY = 'select count(*) from %s' % tablename
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
def check_empty_table(tablename, dbname):
check_row_count(tablename, dbname, 0)
def match_table_select(context, src_tablename, src_dbname, dest_tablename, dest_dbname, orderby=None, options=''):
if orderby != None :
dest_tbl_qry = 'psql -d %s -c \'select * from %s order by %s\' %s' % (dest_dbname, dest_tablename, orderby, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s order by %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, orderby, options)
else:
dest_tbl_qry = 'psql -d %s -c \'select * from %s\' %s' % (dest_dbname, dest_tablename, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, options)
(_, dest_content, _) = run_cmd(dest_tbl_qry)
(_, src_content, _) = run_cmd(src_tbl_qry)
if src_content != dest_content:
raise Exception('''table %s in database %s of source system does not match rows with table %s in database %s of destination system.\n
destination table content:\n%s\n
source table content:\n%s\n''' % (
src_tablename,src_dbname, dest_tablename, dest_dbname, dest_content, src_content))
def get_master_hostname(dbname='template1'):
master_hostname_sql = "select distinct hostname from gp_segment_configuration where content=-1 and role='p'"
return getRows(dbname, master_hostname_sql)
def get_hosts_and_datadirs(dbname='template1'):
get_hosts_and_datadirs_sql = "select hostname, fselocation from gp_segment_configuration, pg_filespace_entry where fsedbid = dbid and role='p';"
return getRows(dbname, get_hosts_and_datadirs_sql)
def get_hosts(dbname='template1'):
get_hosts_sql = "select distinct hostname from gp_segment_configuration where role='p';"
return getRows(dbname, get_hosts_sql)
def get_backup_dirs_for_hosts(dbname='template1'):
get_backup_dir_sql = "select hostname,f.fselocation from pg_filespace_entry f inner join gp_segment_configuration g on f.fsedbid=g.dbid and g.role='p'"
results = getRows(dbname, get_backup_dir_sql)
dir_map = {}
for res in results:
host,dir = res
dir_map.setdefault(host,[]).append(dir)
return dir_map
def cleanup_backup_files(context, dbname, location=None):
dir_map = get_backup_dirs_for_hosts(dbname)
for host in dir_map:
if location:
cmd_str = "ssh %s 'DIR=%s;if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi'"
cmd = cmd_str % (host, location)
else:
cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi; done'"
cmd = cmd_str % (host, " ".join(dir_map[host]))
run_command(context, cmd)
if context.exception:
raise context.exception
def cleanup_report_files(context, master_data_dir):
if not master_data_dir:
raise Exception("master_data_dir not specified in cleanup_report_files")
if master_data_dir.strip() == '/':
raise Exception("Can't call cleanup_report_files on root directory")
file_pattern = "gp_*.rpt"
cleanup_cmd = "rm -f %s/%s" % (master_data_dir, file_pattern)
run_command(context, cleanup_cmd)
if context.exception:
raise context.exception
def truncate_table(dbname, tablename):
TRUNCATE_SQL = 'TRUNCATE %s' % tablename
execute_sql(dbname, TRUNCATE_SQL)
def verify_truncate_in_pg_stat_last_operation(context, dbname, oid):
VERIFY_TRUNCATE_SQL = """SELECT *
FROM pg_stat_last_operation
WHERE objid = %d and staactionname = 'TRUNCATE' """ % oid
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
row = dbconn.execSQLForSingletonRow(conn, VERIFY_TRUNCATE_SQL)
if len(row) != 7:
raise Exception('Invalid number of colums %d' % len(row))
if row[2] != 'TRUNCATE':
raise Exception('Actiontype not expected TRUNCATE "%s"' % row[2])
if row[5]:
raise Exception('Subtype for TRUNCATE operation is not empty %s' % row[5])
def verify_truncate_not_in_pg_stat_last_operation(context, dbname, oid):
VERIFY_TRUNCATE_SQL = """SELECT count(*)
FROM pg_stat_last_operation
WHERE objid = %d and staactionname = 'TRUNCATE' """ % oid
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
thecount = dbconn.execSQLForSingleton(conn, VERIFY_TRUNCATE_SQL)
if thecount != 0:
raise Exception("Found %s rows from query '%s' should be 0" % (thecount, VERIFY_TRUNCATE_SQL))
def get_table_oid(context, dbname, schema, tablename):
OID_SQL = """SELECT c.oid
FROM pg_class c, pg_namespace n
WHERE c.relnamespace = n.oid AND c.relname = '%s' AND n.nspname = '%s'""" % (tablename, schema)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
oid = dbconn.execSQLForSingleton(conn, OID_SQL)
return oid
def insert_numbers(dbname, tablename, lownum, highnum):
sql = "insert into %s select generate_series(%s, %s)" % (tablename, lownum, highnum)
execute_sql(dbname, sql)
def verify_integer_tuple_counts(context, filename):
with open(filename, 'r') as fp:
for line in fp:
tupcount = line.split(',')[-1].strip()
if re.match("^\d+?\.\d+?$", tupcount) is not None:
raise Exception('Expected an integer tuplecount in file %s found float' % filename)
def create_fake_pg_aoseg_table(context, table, dbname):
sql = """CREATE TABLE %s(segno int,
eof double precision,
tupcount double precision,
modcount bigint,
varblockcount double precision,
eofuncompressed double precision)""" % table
execute_sql(dbname, sql)
def insert_row(context, row_values, table, dbname):
sql = """INSERT INTO %s values(%s)""" % (table, row_values)
execute_sql(dbname, sql)
def copy_file_to_all_db_hosts(context, filename):
hosts_set = set()
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentPrimary():
hosts_set.add(seg.getSegmentAddress())
hostfile = '/tmp/copy_host_file.behave'
with open(hostfile, 'w') as fd:
for h in hosts_set:
fd.write('%s\n' % h)
cmd = 'gpscp -f %s %s =:%s' % (hostfile, filename, filename)
run_command(context, cmd)
if context.exception:
raise Exception("FAIL: '%s' '%s'" % (cmd, context.exception.__str__()))
os.remove(hostfile)
def create_large_num_partitions(table_type, table_name, db_name, num_partitions=None):
if table_type == "ao":
condition = "with(appendonly=true)"
elif table_type == "co":
condition = "with(appendonly=true, orientation=column)"
else:
condition = ""
if num_partitions is None:
create_large_partitions_sql = """
create table %s (column1 int, column2 int) %s partition by range(column1) subpartition by range(column2) subpartition template(start(1) end(75) every(1)) (start(1) end(75) every(1))
""" % (table_name, condition)
else:
create_large_partitions_sql = """
create table %s (column1 int, column2 int) %s partition by range(column1) (start(1) end(%d) every(1))
""" % (table_name, condition, num_partitions)
execute_sql(db_name, create_large_partitions_sql)
if '.' in table_name:
schema, table = table_name.split('.')
verify_table_exists_sql = """select count(*) from pg_class c, pg_namespace n
where c.relname = E'%s' and n.nspname = E'%s' and c.relnamespace = n.oid;
""" % (table, schema)
else:
verify_table_exists_sql = """select count(*) from pg_class where relname = E'%s'""" % table_name
num_rows = getRows(db_name, verify_table_exists_sql)[0][0]
if num_rows != 1:
raise Exception('Creation of table "%s:%s" failed. Num rows in pg_class = %s' % (db_name, table_name, num_rows))
def validate_num_restored_tables(context, num_tables, dbname):
tbls = get_table_names(dbname)
count_query = """select count(*) from %s"""
num_validate_tables = 0
for t in tbls:
name = '%s.%s' % (t[0], t[1])
count = getRows(dbname, count_query % name)[0][0]
if count == 0:
continue
else:
validate_restore_data(context, name, dbname)
num_validate_tables += 1
if num_validate_tables != int(num_tables.strip()):
raise Exception('Invalid number of tables were restored. Expected "%s", Actual "%s"' % (num_tables, num_validate_tables))
def get_partition_list(partition_type, dbname):
if partition_type == 'ao':
sql = GET_ALL_AO_DATATABLES_SQL
elif partition_type == 'co':
sql = GET_ALL_CO_DATATABLES_SQL
partition_list = getRows(dbname, sql)
for line in partition_list:
if len(line) != 4:
raise Exception('Invalid results from query to get all AO tables: [%s]' % (','.join(line)))
return partition_list
def verify_stats(dbname, partition_info):
for (oid, schemaname, partition_name, tupletable) in partition_info:
tuple_count_sql = "select to_char(sum(tupcount::bigint), '999999999999999999999') from pg_aoseg.%s" % tupletable
tuple_count = getRows(dbname, tuple_count_sql)[0][0]
if tuple_count:
tuple_count = tuple_count.strip()
else:
tuple_count = '0'
validate_tuple_count(dbname, schemaname, partition_name, tuple_count)
def validate_tuple_count(dbname, schemaname, partition_name, tuple_count):
sql = 'select count(*) from %s.%s' % (schemaname, partition_name)
row_count = getRows(dbname, sql)[0][0]
if int(row_count) != int(tuple_count):
raise Exception('Stats for the table %s.%s does not match. Stat count "%s" does not match the actual tuple count "%s"' % (schemaname, partition_name, tuple_count, row_count))
def validate_aoco_stats(context, dbname, table, expected_tupcount):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
schema, table = table.split('.')
sql = "SELECT relname FROM pg_class \
WHERE oid in (SELECT segrelid FROM pg_appendonly \
WHERE relid in (SELECT oid FROM pg_class \
WHERE relname = '%s' AND relnamespace = (SELECT oid FROM pg_namespace \
WHERE nspname = '%s')))" % (table, schema)
tname = dbconn.execSQLForSingleton(conn, sql)
sql = "select sum(tupcount) from pg_aoseg.%s" % tname.strip()
rows = getRows(dbname, sql)
tupcount = int(rows[0][0])
if tupcount != int(expected_tupcount):
raise Exception("%s has stats of %d rows in %s table and should have %s" % (table, tupcount, tname, expected_tupcount))
def validate_no_aoco_stats(context, dbname, table):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
sql = "select relname from pg_class where oid in (select segrelid from pg_appendonly where relid in (select oid from pg_class where relname = '%s'))" % table
tname = dbconn.execSQLForSingleton(conn, sql)
sql = "select tupcount from pg_aoseg.%s" % tname.strip()
rows = getRows(dbname, sql)
if len(rows) != 0:
raise Exception("%s has stats of %d rows in %s table and should be 0" % (table, int(rows[0][0]), tname))
def get_all_hostnames_as_list(context, dbname):
hosts = []
segs = get_segment_hostnames(context, dbname)
for seg in segs:
hosts.append(seg[0].strip())
masters = get_master_hostname(dbname)
for master in masters:
hosts.append(master[0].strip())
return hosts
def get_pid_for_segment(seg_data_dir, seg_host):
cmd = Command(name='get list of postmaster processes',
cmdStr='ps -eaf | grep %s' % seg_data_dir,
ctxt=REMOTE,
remoteHost=seg_host)
cmd.run(validateAfter=True)
pid = None
results = cmd.get_results().stdout.strip().split('\n')
for res in results:
if 'grep' not in res:
pid = res.split()[1]
if pid is None:
return None
return int(pid)
def install_gppkg(context):
if 'GPPKG_PATH' not in os.environ:
raise Exception('GPPKG_PATH needs to be set in the environment to install gppkg')
if 'GPPKG_NAME' not in os.environ:
raise Exception('GPPKG_NAME needs to be set in the environment to install gppkg')
gppkg_path = os.environ['GPPKG_PATH']
gppkg_name = os.environ['GPPKG_NAME']
command = "gppkg --install %s/%s.gppkg" % (gppkg_path, gppkg_name)
run_command(context, command)
print "Install gppkg command: '%s', stdout: '%s', stderr: '%s'" % (command, context.stdout_message, context.error_message)
def enable_postgis_and_load_test_data_for_postgis_1(context):
if 'GPHOME' not in os.environ:
raise Exception('GPHOME needs to be set in the environment')
install_gppkg(context)
gphome = os.environ['GPHOME']
path = "%s/share/postgresql/contrib" % gphome
command = "psql -d opengeo -f %s/postgis.sql" % path
run_command(context, command)
command = "psql -d opengeo -f %s/spatial_ref_sys.sql" % path
run_command(context, command)
current_path = os.path.realpath(__file__)
current_dir = os.path.dirname(current_path)
postgis_data_dir = "%s/../behave/mgmt_utils/steps/data/postgis" % current_dir
command = "psql -d opengeo -f %s/nyc_census_blocks_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_neighborhoods_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_subway_stations_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_census_sociodata.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_streets_1.sql" % postgis_data_dir
run_command(context, command)
def enable_postgis_and_load_test_data(context):
if 'GPHOME' not in os.environ:
raise Exception('GPHOME needs to be set in the environment')
install_gppkg(context)
gphome = os.environ['GPHOME']
path = "%s/share/postgresql/contrib/postgis-2.0" % gphome
command = "psql -d opengeo -f %s/postgis.sql" % path
run_command(context, command)
command = "psql -d opengeo -f %s/spatial_ref_sys.sql" % path
run_command(context, command)
current_path = os.path.realpath(__file__)
current_dir = os.path.dirname(current_path)
postgis_data_dir = "%s/../behave/mgmt_utils/steps/data/postgis" % current_dir
command = "psql -d opengeo -f %s/nyc_census_blocks.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_neighborhoods.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_subway_stations.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_census_sociodata.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_streets.sql" % postgis_data_dir
run_command(context, command)
def kill_process(pid, host=None, sig=signal.SIGTERM):
if host is not None:
cmd = Command('kill process on a given host',
cmdStr='kill -%d %d' % (sig, pid),
ctxt=REMOTE,
remoteHost=host)
cmd.run(validateAfter=True)
else:
os.kill(pid, sig)
def get_num_segments(primary=True, mirror=True, master=True, standby=True):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segments = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
mirror_segments = [seg for seg in gparray.getDbList() if seg.isSegmentMirror()]
num_segments = 0
if primary:
num_segments += len(primary_segments)
if mirror:
num_segments += len(mirror_segments)
if master and gparray.master is not None:
num_segments += 1
if standby and gparray.standbyMaster is not None:
num_segments += 1
return num_segments
def check_user_permissions(file_name, access_mode):
st = os.stat(file_name)
if access_mode == 'write':
return bool(st.st_mode & stat.S_IWUSR)
elif access_mode == 'read':
return bool(st.st_mode & stat.S_IRUSR)
elif access_mode == 'execute':
return bool(st.st_mode & stat.S_IXUSR)
else:
raise Exception('Invalid mode specified, should be read, write or execute only')
def get_change_tracking_segment_info():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentModeInChangeLogging():
return seg.getSegmentPort(), seg.getSegmentHostName()
def are_segments_running():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.status != 'u':
return False
return True
def modify_sql_file(file, hostport):
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
if line.find("gpfdist")>=0:
line = re.sub('(\d+)\.(\d+)\.(\d+)\.(\d+)\:(\d+)',hostport, line)
print str(re.sub('\n','',line))
def create_gpfilespace_config(host, port, user,fs_name, config_file, working_dir='/tmp'):
mirror_hosts = []
primary_hosts = []
standby_host = ''
master_host = ''
fspath_master = working_dir + '/fs_master'
fspath_standby = working_dir + '/fs_standby'
fspath_primary = working_dir + '/fs_primary'
fspath_mirror = working_dir + '/fs_mirror'
get_master_filespace_entry = 'psql -t -h %s -p %s -U %s -d template1 -c \" select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'p\' and content=-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_master_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_master_filespace_entry)
else:
file = open(config_file,'w')
file.write('filespace:%s\n'%fs_name)
result = out.split('\n')
for line in result:
if line.strip():
row = line.split('|')
row = [col.strip() for col in row]
hostname = row[0]
master_host = hostname
dbid = row[1]
fs_loc = os.path.join(fspath_master,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_standby_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'m\' and content=-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_standby_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_standby_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
standby_host= hostname
dbid = row[1]
fs_loc = os.path.join(fspath_standby,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_primary_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'p\' and content>-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_primary_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_primary_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
primary_hosts.append(hostname)
dbid = row[1]
fs_loc = os.path.join(fspath_primary,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_mirror_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'m\' and content>-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_mirror_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_mirror_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
mirror_hosts.append(hostname)
dbid = row[1]
fs_loc = os.path.join(fspath_mirror,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
for host in primary_hosts:
remove_dir(host,fspath_primary)
create_dir(host,fspath_primary)
for host in mirror_hosts:
remove_dir(host,fspath_mirror)
create_dir(host,fspath_mirror)
remove_dir(master_host,fspath_master)
remove_dir(standby_host,fspath_standby)
create_dir(master_host,fspath_master)
create_dir(standby_host,fspath_standby)
def remove_dir(host, directory):
cmd = 'gpssh -h %s -e \'rm -rf %s\''%(host, directory)
run_cmd(cmd)
def create_dir(host, directory):
cmd = 'gpssh -h %s -e \'mkdir -p %s\''%(host, directory)
run_cmd(cmd)
def wait_till_change_tracking_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_ct_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode =\'c\';"'%(host, port, user)
(rc, out, err) = run_cmd(num_ct_nodes)
if rc != 0:
raise Exception('Exception from executing psql query: %s'%num_ct_nodes)
else:
num_cl = int(out.strip())
count = 0
while(num_cl == 0):
time.sleep(30)
(rc, out, err) = run_cmd(num_ct_nodes)
num_cl = int(out.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in change tracking")
return (True,num_cl)
def wait_till_insync_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_unsync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode <> \'s\' or status<> \'u\';"'%(host, port, user)
(rc, out, err) = run_cmd(num_unsync_nodes)
if rc != 0:
raise Exception('Exception from executing psql query: %s'%num_unsync_nodes)
else:
num_unsync = int(out.strip())
count = 0
while(num_unsync > 0):
time.sleep(30)
(rc, out, err) = run_cmd(num_unsync_nodes)
num_unsync = int(out.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in sync transition")
return True
def wait_till_resync_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_resync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode =\'r\';"'%(host, port, user)
num_insync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode <>\'s\';"'%(host, port, user)
(rc1, out1, err1) = run_cmd(num_resync_nodes)
(rc2, out2, err2) = run_cmd(num_insync_nodes)
if rc1 !=0 or rc2 !=0:
raise Exception('Exception from executing psql query: %s'%num_unsync_nodes)
else:
num_resync = int(out1.strip())
num_insync = int(out2.strip())
count = 0
while(num_resync != num_insync):
time.sleep(30)
(rc1, out1, err1) = run_cmd(num_resync_nodes)
(rc2, out2, err2) = run_cmd(num_insync_nodes)
num_resync = int(out1.strip())
num_insync = int(out2.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in sync transition")
return True
def check_dump_dir_exists(context, dbname):
dir_map = get_backup_dirs_for_hosts(dbname)
cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/db_dumps/\" ]; then echo \"$DIR EXISTS\"; else echo \"$DIR NOT FOUND\"; fi; done'"
for host in dir_map:
cmd = cmd_str % (host, " ".join(dir_map[host]))
run_command(context, cmd)
if context.exception:
raise context.exception
if 'EXISTS' in context.stdout_message:
raise Exception("db_dumps directory is present in master/segments.")
def verify_restored_table_is_analyzed(context, table_name, dbname):
ROW_COUNT_SQL = """SELECT count(*) FROM %s""" % table_name
if table_name.find('.') != -1:
schema_name,table_name = table_name.split(".")
else:
schema_name = 'public'
schema_name = pg.escape_string(schema_name)
table_name = pg.escape_string(table_name)
ROW_COUNT_PG_CLASS_SQL = """SELECT reltuples FROM pg_class WHERE relname = '%s'
AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '%s')""" % (table_name, schema_name)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, ROW_COUNT_SQL)
rows = curs.fetchall()
curs = dbconn.execSQL(conn, ROW_COUNT_PG_CLASS_SQL)
rows_from_pgclass = curs.fetchall()
if rows == rows_from_pgclass:
return True
else:
return False
def analyze_database(context, dbname):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, "analyze")
def delete_rows_from_table(context, dbname, table_name, column_name, info):
DELETE_SQL = """DELETE FROM %s WHERE %s = %s""" % (table_name, column_name, info)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, DELETE_SQL)
conn.commit()
def validate_parse_email_file(context, email_file_path):
if os.path.isfile(email_file_path) is False:
raise Exception("\'%s\' file does not exist." % email_file_path)
if email_file_path.split('.')[1] != "yaml":
raise Exception("\'%s\' is not \'.yaml\' file. File containing email details should be \'.yaml\' file." % email_file_path)
if (os.path.getsize(email_file_path) > 0) is False:
raise Exception("\'%s\' file is empty." % email_file_path)
email_key_list = ["DBNAME","FROM", "SUBJECT"]
try:
with open(email_file_path, 'r') as f:
doc = yaml.load(f)
context.email_details = doc['EMAIL_DETAILS']
for email in context.email_details:
for key in email.keys():
if key not in email_key_list:
raise Exception(" %s not present" % key)
except Exception as e:
raise Exception("\'%s\' file is not formatted properly." % email_file_path)
def check_count_for_specific_query(dbname, query, nrows):
NUM_ROWS_QUERY = '%s' % query
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
| apache-2.0 | 1,936,188,500,938,947,300 | 43.489924 | 246 | 0.619453 | false |
nhazekam/cctools | chirp/src/bindings/python/chirp.binding.py | 1 | 22645 | ## @package ChirpPython
#
# Python Chirp bindings.
#
# The objects and methods provided by this package correspond to the native
# C API in @ref chirp_reli.h and chirp_swig_wrap.h
#
# The SWIG-based Python bindings provide a higher-level interface that
# revolves around:
#
# - @ref Chirp.Client
# - @ref Chirp.Stat
import os
import time
import json
import binascii
##
# Python Client object
#
# This class is used to create a chirp client
class Client(object):
##
# Create a new chirp client
#
# @param self Reference to the current task object.
# @param hostport The host:port of the server.
# @param timeout The time to wait for a server response on every request.
# @param authentication A list of prefered authentications. E.g., ['tickets', 'unix']
# @param debug Generate client debug output.
def __init__(self, hostport, timeout=60, authentication=None, tickets=None, debug=False):
self.hostport = hostport
self.timeout = timeout
if debug:
cctools_debug_config('chirp_python_client')
cctools_debug_flags_set('chirp')
if tickets and (authentication is None):
authentication = ['ticket']
self.__set_tickets(tickets)
if authentication is None:
auth_register_all()
else:
for auth in authentication:
auth_register_byname(auth)
self.identity = self.whoami()
if self.identity is '':
raise AuthenticationFailure(authentication)
def __exit__(self):
chirp_reli_disconnect(self.hostport)
def __del__(self):
chirp_reli_disconnect(self.hostport)
def __stoptime(self, absolute_stop_time=None, timeout=None):
if timeout is None:
timeout = self.timeout
if absolute_stop_time is None:
absolute_stop_time = time.time() + timeout
return absolute_stop_time
def __set_tickets(self, tickets):
tickets_str = None
if tickets is None:
try:
tickets_str = os.environ['CHIRP_CLIENT_TICKETS']
except KeyError:
tickets_str = None
else:
tickets_str = ','.join(tickets)
if tickets_str is not None:
auth_ticket_load(tickets_str)
##
# Returns a string with identity of the client according to the server.
#
# @param self Reference to the current task object.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def whoami(self, absolute_stop_time=None, timeout=None):
return chirp_wrap_whoami(self.hostport, self.__stoptime(absolute_stop_time, timeout))
##
# Returns a string with the ACL of the given directory.
# Throws an IOError on error (no such directory).
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def listacl(self, path='/', absolute_stop_time=None, timeout=None):
acls = chirp_wrap_listacl(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if acls is None:
raise IOError(path)
return acls.split('\n')
##
# Returns a string with the ACL of the given directory.
# Throws a GeneralError on error.
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param subject Target subject.
# @param rights Permissions to be granted.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def setacl(self, path, subject, rights, absolute_stop_time=None, timeout=None):
result = chirp_reli_setacl(self.hostport, path, subject, rights, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('setacl', result, [path, subject, rights])
return result
##
# Set the ACL for the given directory to be only for the rights to the calling user.
# Throws a GeneralError on error.
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param rights Permissions to be granted.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def resetacl(self, path, rights, absolute_stop_time=None, timeout=None):
result = chirp_wrap_resetacl(self.hostport, path, rights, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('resetacl', result, [path, subject, rights])
return result
##
# Returns a list with the names of the files in the path.
# Throws an IOError on error (no such directory).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def ls(self, path, absolute_stop_time=None, timeout=None):
dr = chirp_reli_opendir(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
files = []
if dir is None:
raise IOError(path)
while True:
d = chirp_reli_readdir(dr)
if d is None: break
files.append(Stat(d.name, d.info))
return files
##
# Returns a Chirp.Stat object with information on path.
# Throws an IOError on error (e.g., no such path or insufficient permissions).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def stat(self, path, absolute_stop_time=None, timeout=None):
info = chirp_wrap_stat(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if info is None:
raise IOError(path)
return Stat(path, info)
##
# Changes permissions on path.
# Throws a GeneralFailure on error (e.g., no such path or insufficient permissions).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param mode Desired permissions (e.g., 0755)
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def chmod(self, path, mode, absolute_stop_time=None, timeout=None):
result = chirp_reli_chmod(self.hostport, path, mode, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('chmod', result)
return result
##
# Copies local file/directory source to the chirp server as file/directory destination.
# If destination is not given, source name is used.
# Raises Chirp.TransferFailure on error.
#
# @param self Reference to the current task object.
# @param source A local file or directory.
# @param destination File or directory name to use in the server (defaults to source).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def put(self, source, destination=None, absolute_stop_time=None, timeout=None):
if destination is None:
destination = source
result = chirp_recursive_put(self.hostport,
source, destination,
self.__stoptime(absolute_stop_time, timeout))
if(result > -1):
return result
raise TransferFailure('put', result, source, destination)
##
# Copies server file/directory source to the local file/directory destination.
# If destination is not given, source name is used.
# Raises Chirp.TransferFailure on error.
#
# @param self Reference to the current task object.
# @param source A server file or directory.
# @param destination File or directory name to be used locally (defaults to source).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def get(self, source, destination=None, absolute_stop_time=None, timeout=None):
if destination is None:
destination = source
result = chirp_recursive_get(self.hostport,
source, destination,
self.__stoptime(absolute_stop_time, timeout))
if(result > -1):
return result
raise TransferFailure('get', result, source, destination)
##
# Removes the given file or directory from the server.
# Raises OSError on error.
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def rm(self, path, absolute_stop_time=None, timeout=None):
status = chirp_reli_rmall(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if status < 0:
raise OSError
##
# Recursively create the directories in path.
# Raises OSError on error.
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param mode Unix permissions for the created directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def mkdir(self, path, mode=493, absolute_stop_time=None, timeout=None):
result = chirp_reli_mkdir_recursive(self.hostport, path, mode, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise OSError
return result
##
# Computes the checksum of path.
# Raises IOError on error.
#
# @param self Reference to the current task object.
# @param path Target file.
# @param algorithm One of 'md5' or 'sha1' (default).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def hash(self, path, algorithm='sha1', absolute_stop_time=None, timeout=None):
hash_hex = chirp_wrap_hash(self.hostport, path, algorithm, self.__stoptime(absolute_stop_time, timeout))
if hash_hex is None:
raise IOError
return hash_hex
##
# Creates a chirp job. See http://ccl.cse.nd.edu/software/manuals/chirp.html for details.
#
# @param job_description A dictionary with a job chirp description.
#
# @code
# job_description = {
# 'executable': "/bin/tar",
# 'arguments': [ 'tar', '-cf', 'archive.tar', 'a', 'b' ],
# 'files': { 'task_path': 'a',
# 'serv_path': '/users/magrat/a.txt'
# 'type': 'INPUT' },
# { 'task_path': 'b',
# 'serv_path': '/users/magrat/b.txt'
# 'type': 'INPUT' },
# { 'task_path': 'archive.tar',
# 'serv_path': '/users/magrat/archive.tar'
# 'type': 'OUTPUT' }
# }
# job_id = client.job_create(job_description);
# @endcode
def job_create(self, job_description):
job_json = json.dumps(job_description)
job_id = chirp_wrap_job_create(self.hostport, job_json, self.__stoptime())
if job_id < 0:
raise ChirpJobError('create', job_id, job_json)
return job_id;
##
# Kills the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be killed.
#
def job_kill(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_kill(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('kill', result, ids_str)
return result;
##
# Commits (starts running) the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be committed.
#
def job_commit(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_commit(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('commit', result, ids_str)
return result;
##
# Reaps the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be reaped.
#
def job_reap(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_reap(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('reap', result, ids_str)
return result;
##
# Obtains the current status for each job id. The value returned is a
# list which contains a dictionary reference per job id.
#
# @param job_ids Job ids of the chirp jobs to be reaped.
#
def job_status(self, *job_ids):
ids_str = json.dumps(job_ids)
status = chirp_wrap_job_status(self.hostport, ids_str, self.__stoptime())
if status is None:
raise ChirpJobError('status', None, ids_str)
return json.loads(status);
##
# Waits waiting_time seconds for the job_id to terminate. Return value is
# the same as job_status. If the call timesout, an empty string is
# returned. If job_id is missing, C<<job_wait>> waits for any of the user's job.
#
# @param waiting_time maximum number of seconds to wait for a job to finish.
# @param job_id id of the job to wait.
def job_wait(self, waiting_time, job_id = 0):
status = chirp_wrap_job_wait(self.hostport, job_id, waiting_time, self.__stoptime())
if status is None:
raise ChirpJobError('status', None, job_id)
return json.loads(status);
##
# Python Stat object
#
# This class is used to record stat information for files/directories of a chirp server.
class Stat(object):
def __init__(self, path, cstat):
self._path = path
self._info = cstat
##
# Target path.
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.path
# @endcode
@property
def path(self):
return self._path
##
# ID of device containing file.
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.device
# @endcode
@property
def device(self):
return self._info.cst_dev
##
# inode number
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.inode
# @endcode
@property
def inode(self):
return self._info.cst_ino
##
# file mode permissions
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.mode
# @endcode
@property
def mode(self):
return self._info.cst_mode
##
# number of hard links
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.nlink
# @endcode
@property
def nlink(self):
return self._info.cst_nlink
##
# user ID of owner
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.uid
# @endcode
@property
def uid(self):
return self._info.cst_uid
##
# group ID of owner
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.gid
# @endcode
@property
def gid(self):
return self._info.cst_gid
##
# device ID if special file
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.rdev
# @endcode
@property
def rdev(self):
return self._info.cst_rdev
##
# total size, in bytes
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.size
# @endcode
@property
def size(self):
return self._info.cst_size
##
# block size for file system I/O
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.block_size
# @endcode
@property
def block_size(self):
return self._info.cst_blksize
##
# number of 512B blocks allocated
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.blocks
# @endcode
@property
def blocks(self):
return self._info.cst_blocks
##
# number of seconds since epoch since last access
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.atime
# @endcode
@property
def atime(self):
return self._info.cst_atime
##
# number of seconds since epoch since last modification
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.mtime
# @endcode
@property
def mtime(self):
return self._info.cst_mtime
##
# number of seconds since epoch since last status change
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.ctime
# @endcode
@property
def ctime(self):
return self._info.cst_ctime
def __repr__(self):
return "%s uid:%d gid:%d size:%d" % (self.path, self.uid, self.gid, self.size)
class AuthenticationFailure(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralFailure(Exception):
def __init__(self, action, status, value):
self.action = action
self.status = status
self.value = value
def __str__(self):
return "%s(%s) %s" % (self.action, self.status, self.value)
class TransferFailure(Exception):
def __init__(self, action, status, source, dest):
self.action = action
self.status = status
self.source = source
self.dest = dest
def __str__(self):
return "Error with %s(%s) %s %s" % (self.action, self.status, self.source, self.dest)
class ChirpJobError(Exception):
def __init__(self, action, status, value):
self.action = action
self.status = status
self.value = value
def __str__(self):
return "%s(%s) %s" % (self.action, self.status, self.value)
# @endcode
| gpl-2.0 | -4,008,700,873,344,837,600 | 33.678407 | 118 | 0.573151 | false |
kralf/morsel | python/lib/morsel/nodes/ode/solids/mesh.py | 1 | 1092 | from morsel.panda import *
from morsel.nodes.node import Node
from morsel.nodes.ode.object import Object
from morsel.nodes.facade import Mesh as _Mesh
from morsel.nodes.ode.solid import Solid
#-------------------------------------------------------------------------------
class Mesh(Solid):
def __init__(self, **kargs):
super(Mesh, self).__init__(**kargs)
#-------------------------------------------------------------------------------
def getMesh(self):
if not self._mesh and self.object:
self._mesh = _Mesh(parent = self)
self._mesh.copyFrom(self.object.mesh.model, flatten = True)
return self._mesh
mesh = property(getMesh)
#-------------------------------------------------------------------------------
def fit(self, node):
Solid.fit(self, node)
mesh = _Mesh(position = self.globalPosition, orientation =
self.globalOrientation)
mesh.copyFrom(node.mesh, flatten = True)
data = panda.OdeTriMeshData(mesh)
mesh.detachNode()
self.geometry = panda.OdeTriMeshGeom(node.world.space, data)
| gpl-2.0 | -6,021,325,268,098,747,000 | 29.361111 | 80 | 0.519231 | false |
alexhilton/miscellaneous | python/pygrep.py | 1 | 3334 | #!/usr/bin/env python
"""A Python version of grep utility.
Search one or more named input files against one ore more given patterns.
Print the line containing the match, if there are any.
"""
from optparse import OptionParser;
import re;
import fileinput;
import os.path;
FILENAME = '\033[92m';
LINENO = '\033[94m';
MATCH = '\033[91m';
ENDC = '\033[0m';
class MultiMatcher(object):
"""A set of searchable Regular Expression Patterns
Accept one or more regular expression such that if any one of them
matches a line the first successful match is returned.
"""
def __init__(self, multipattern, ignore_case):
flags = 0;
if ignore_case:
flags = re.IGNORECASE;
self.multipattern = [re.compile(pattern, flags) for pattern in multipattern];
def search(self, line):
for pattern in self.multipattern:
m = pattern.search(line);
if m is not None:
return m;
def build_options():
parser = OptionParser(usage = "usage: %prog [options] -e PATTERN files", version = "%prog 1.0");
parser.add_option("-i", "--ignore-case", action = "store_true", dest = "ignore_case",
default = False, help = "ignore case of letters when matching");
parser.add_option("-r", "--recursive", action = "store_true", dest = "recursive",
default = False, help = "search for files in directory recursively");
parser.add_option("-n", "--negative", action = "store_true", dest = "negative",
default = False, help = "show the lines that does not match the pattern");
parser.add_option("-e", "--regexpr", action = "append", dest = "regexpr",
help = "specify pattern expression on which to match");
return parser;
def do_matching(filename, matcher):
for line in fileinput.input(filename):
line = line.rstrip();
match = matcher.search(line);
if options.negative:
if match is None:
print "%s%s:%s%d %s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), ENDC, line);
else:
if match is not None:
first_part = line[:match.start()];
the_match = line[match.start():match.end()];
second_part = line[match.end():];
print "%s%s:%s%d %s%s%s%s%s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), \
ENDC, first_part, MATCH, the_match, ENDC, second_part);
def main():
global options;
parser = build_options();
options, args = parser.parse_args();
if not options.regexpr:
parser.error("You must specify at least one PATTERN");
if not args:
parser.error("You must specify at least one input file or directory");
matcher = MultiMatcher(options.regexpr, options.ignore_case);
for filename in args:
if not os.path.exists(filename):
print "No such file or directory: ", filename;
continue;
if options.recursive and os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
[do_matching(os.path.join(root, entry), matcher) for entry in files];
elif os.path.isfile(filename):
do_matching(filename, matcher);
if __name__ == "__main__":
main();
| apache-2.0 | 1,519,449,779,832,885,800 | 37.321839 | 118 | 0.607379 | false |
tejal29/pants | src/python/pants/base/exceptions.py | 1 | 1226 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class TaskError(Exception):
"""Indicates a task has failed."""
def __init__(self, *args, **kwargs):
""":param int exit_code: an optional exit code (1, by default)"""
self._exit_code = kwargs.pop('exit_code', 1)
super(TaskError, self).__init__(*args, **kwargs)
@property
def exit_code(self):
return self._exit_code
class TargetDefinitionException(Exception):
"""Indicates an invalid target definition."""
def __init__(self, target, msg):
"""
:param target: the target in question
:param string msg: a description of the target misconfiguration
"""
super(Exception, self).__init__('Invalid target %s: %s' % (target, msg))
class BuildConfigurationError(Exception):
"""Indicates an error in a pants installation's configuration."""
class BackendConfigurationError(BuildConfigurationError):
"""Indicates a plugin backend with a missing or malformed register module."""
| apache-2.0 | 1,081,033,649,883,924,500 | 31.263158 | 93 | 0.692496 | false |
etkirsch/legends-of-erukar | erukar/content/inventory/weapons/standard/Focus.py | 1 | 1029 | import numpy as np
from erukar.system.engine.inventory import ArcaneWeapon
class Focus(ArcaneWeapon):
Probability = 1
BaseName = "Focus"
EssentialPart = "devotion"
AttackRange = 3
RangePenalty = 3
BaseWeight = 1.0
# Damage
DamageRange = [2, 5]
DamageType = 'force'
DamageModifier = "sense"
DamageScalar = 2.4
ScalingRequirement = 6
EnergyCost = 5
# Distribution
Distribution = np.random.gamma
DistributionProperties = (2, 0.3)
BaseStatInfluences = {
'sense': {'requirement': 8, 'scaling_factor': 3.5, 'cutoff': 200},
'acuity': {'requirement': 0, 'scaling_factor': 1.2, 'cutoff': 100},
}
def failing_requirements(self, wielder):
if wielder.arcane_energy < self.EnergyCost:
return ['Not enough Arcane Energy to use {} -- need {}, have {}'.format(self.alias(), self.EnergyCost, wielder.arcane_energy)]
def on_calculate_attack(self, cmd):
cmd.args['player_lifeform'].arcane_energy -= self.EnergyCost
| agpl-3.0 | -8,065,563,989,550,132,000 | 27.583333 | 138 | 0.640428 | false |
agx/git-buildpackage | tests/component/deb/__init__.py | 1 | 1200 | # vim: set fileencoding=utf-8 :
#
# (C) 2012 Intel Corporation <[email protected]>
# (C) 2013 Guido Günther <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
import os
from tests.component import ComponentTestGitRepository
DEB_TEST_SUBMODULE = os.path.join('tests', 'component', 'deb', 'data')
DEB_TEST_DATA_DIR = os.path.abspath(DEB_TEST_SUBMODULE)
DEB_TEST_DOWNLOAD_URL = 'https://git.sigxcpu.org/cgit/gbp/deb-testdata/plain/'
def setup():
"""Test Module setup"""
ComponentTestGitRepository.check_testdata(DEB_TEST_SUBMODULE)
| gpl-2.0 | 5,091,041,567,779,918,000 | 37.677419 | 78 | 0.731443 | false |
fishroot/qdeep | lib/qdeep/objects/script/__init__.py | 1 | 6352 | # -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
import nemoa
import qdeep.objects.common
from PySide import QtGui, QtCore
class Editor(qdeep.objects.common.Editor):
objType = 'script'
def createCentralWidget(self):
self.textArea = QtGui.QTextEdit()
self.textArea.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.textArea.setFont(font)
self.textArea.setAcceptDrops(True)
self.highlighter = Highlighter(self.textArea.document())
self.setCentralWidget(self.textArea)
def createActions(self):
self.actRunScript = QtGui.QAction(
qdeep.common.getIcon('actions', 'debug-run.png'),
"Run Script", self,
shortcut = "F5",
statusTip = "Run python script",
triggered = self.runScript)
def createToolBars(self):
self.scriptToolBar = self.addToolBar("Script")
self.scriptToolBar.addAction(self.actRunScript)
def getModified(self):
return self.textArea.document().isModified()
def setModified(self, value = True):
self.textArea.document().setModified(value)
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot read file %s:\n%s." % (
fileName, file.errorString()))
return False
instr = QtCore.QTextStream(file)
self.textArea.setPlainText(instr.readAll())
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
return True
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot write file %s:\n%s." % (fileName,
file.errorString()))
return False
outstr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstr << self.textArea.toPlainText()
QtGui.QApplication.restoreOverrideCursor()
self.setModified(False)
self.updateWindowTitle()
return True
def runScript(self):
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
nemoa.run(self.getName())
QtGui.QApplication.restoreOverrideCursor()
class Highlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QtGui.QTextCharFormat()
keywordFormat.setForeground(QtCore.Qt.darkBlue)
keywordFormat.setFontWeight(QtGui.QFont.Bold)
keywordPatterns = ["\\bchar\\b", "\\bclass\\b", "\\bconst\\b",
"\\bdouble\\b", "\\benum\\b", "\\bexplicit\\b", "\\bfriend\\b",
"\\binline\\b", "\\bint\\b", "\\blong\\b", "\\bnamespace\\b",
"\\boperator\\b", "\\bprivate\\b", "\\bprotected\\b",
"\\bpublic\\b", "\\bshort\\b", "\\bsignals\\b", "\\bsigned\\b",
"\\bslots\\b", "\\bstatic\\b", "\\bstruct\\b",
"\\btemplate\\b", "\\btypedef\\b", "\\btypename\\b",
"\\bunion\\b", "\\bunsigned\\b", "\\bvirtual\\b", "\\bvoid\\b",
"\\bvolatile\\b", "\\bimport\\b", "\\bdef\\b",
"\\bTrue\\b", "\\bFalse\\b", "\\breturn\\b"]
self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
classFormat = QtGui.QTextCharFormat()
classFormat.setFontWeight(QtGui.QFont.Bold)
classFormat.setForeground(QtCore.Qt.darkMagenta)
self.highlightingRules.append((QtCore.QRegExp("\\bQ[A-Za-z]+\\b"),
classFormat))
singleLineCommentFormat = QtGui.QTextCharFormat()
singleLineCommentFormat.setForeground(QtCore.Qt.red)
self.highlightingRules.append((QtCore.QRegExp("//[^\n]*"),
singleLineCommentFormat))
self.multiLineCommentFormat = QtGui.QTextCharFormat()
self.multiLineCommentFormat.setForeground(QtCore.Qt.red)
quotationFormat = QtGui.QTextCharFormat()
quotationFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append((QtCore.QRegExp("\".*\""),
quotationFormat))
self.highlightingRules.append((QtCore.QRegExp("'.*'"),
quotationFormat))
functionFormat = QtGui.QTextCharFormat()
functionFormat.setFontItalic(True)
functionFormat.setForeground(QtCore.Qt.blue)
self.highlightingRules.append((QtCore.QRegExp("\\b[A-Za-z0-9_]+(?=\\()"),
functionFormat))
self.commentStartExpression = QtCore.QRegExp("/\\*")
self.commentEndExpression = QtCore.QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.commentEndExpression.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()
self.setFormat(startIndex, commentLength,
self.multiLineCommentFormat)
startIndex = self.commentStartExpression.indexIn(text,
startIndex + commentLength)
| gpl-3.0 | -1,445,960,790,410,959,600 | 37.035928 | 97 | 0.614137 | false |
google-research/language | language/conpono/reconstruct/model_builder.py | 1 | 9078 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the paragraph reconstruction model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from bert import modeling
import tensorflow.compat.v1 as tf
from tensorflow.contrib import seq2seq as contrib_seq2seq
class FixedSizeInferenceHelper(contrib_seq2seq.InferenceHelper):
"""Feeds in the output of the decoder at each step for fixed size."""
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for TrainingHelper."""
return (finished, sample_ids, state)
def create_model(model,
labels,
decoder_inputs,
batch_size,
model_type="decode",
sep_positions=None):
"""Creates a classification model.
Args:
model: the BERT model from modeling.py
labels: ground truth paragraph order
decoder_inputs: the input to the decoder if used
batch_size: the batch size
model_type: one of decode, pooled, attn
sep_positions: (optional) for "pooled" indecies of SEP tokens
Returns:
tuple of (loss, per_example_loss, logits, probabilities) for model
"""
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
tpu_batch_size = tf.shape(output_layer)[0]
num_labels = 5 # GOOGLE-INTERNAL TODO(daniter) this shouldn't be hardcoded
with tf.variable_scope("paragraph_reconstruct"):
if model_type == "decode":
lstm_cell = tf.nn.rnn_cell.LSTMCell(
num_units=hidden_size, use_peepholes=True, state_is_tuple=True)
def sample_fn(x):
return tf.to_float(tf.reshape(tf.argmax(x, axis=-1), (-1, 1)))
helper = FixedSizeInferenceHelper(
sample_fn=sample_fn,
sample_shape=[1],
sample_dtype=tf.float32,
start_inputs=decoder_inputs[:, 0],
end_fn=None)
# Decoder
project_layer = tf.layers.Dense(
num_labels, use_bias=False, name="output_projection")
my_decoder = contrib_seq2seq.BasicDecoder(
lstm_cell,
helper,
tf.nn.rnn_cell.LSTMStateTuple(output_layer, output_layer),
output_layer=project_layer)
# Dynamic decoding
outputs, _, _ = contrib_seq2seq.dynamic_decode(
my_decoder,
swap_memory=True,
scope="paragraph_reconstruct",
maximum_iterations=5)
logits = outputs.rnn_output
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
# GOOGLE-INTERAL: TODO(daniter) currently neither of these actually train
elif model_type == "pooled":
token_embeddings = model.get_sequence_output()
# sep positions come out batch by batch so we need to add the batch index
# we do that explicitly here since we don't know the batch size in the
# record decoder
batch_idx = tf.range(tpu_batch_size)
batch_idx = tf.reshape(batch_idx, [tpu_batch_size, 1])
batch_idx = tf.tile(batch_idx, [1, 5]) # double check
batch_idx = tf.reshape(batch_idx, [tpu_batch_size, 5, 1])
# batch_idx = tf.Print(batch_idx, [batch_idx],
# message="batch_idx", summarize=999999)
sep_positions = tf.concat([batch_idx, sep_positions], axis=2)
# sep_positions = tf.Print(sep_positions, [sep_positions],
# message="sep_positions", summarize=999999)
sep_vecs = tf.gather_nd(token_embeddings, sep_positions)
sep_vecs = tf.reshape(sep_vecs, [tpu_batch_size, 5, hidden_size])
# sep_vecs = tf.Print(sep_vecs, [sep_vecs], message="sep_vecs",
# summarize=999999)
logits = tf.layers.dense(
inputs=sep_vecs, units=num_labels, name="output_projection")
# logits = tf.Print(logits, [logits], message="logits", summarize=999999)
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
elif model_type == "attn":
# change size to match sequence embedding size
input_consts = tf.constant([0, 1, 2, 3, 4])
position_encoding = tf.broadcast_to(input_consts, [tpu_batch_size, 5])
# position_encoding = tf.to_float(
# tf.reshape(position_encoding, (-1, 5, 1)))
token_type_table = tf.get_variable(
name="attention_embedding",
shape=[5, 512], # don't hardcode
initializer=tf.truncated_normal_initializer(stddev=0.02))
# This vocab will be small so we always do one-hot here, since it is
# always faster for a small vocabulary.
flat_token_type_ids = tf.reshape(position_encoding, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=5)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[tpu_batch_size, 5, 512])
token_embeddings = model.get_sequence_output()
attn = modeling.attention_layer(token_type_embeddings, token_embeddings)
attn = tf.reshape(attn, (-1, 5, 512)) # head size
logits = tf.layers.dense(
inputs=attn, units=num_labels, name="output_projection")
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
return (loss, per_example_loss, logits, probabilities)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
| apache-2.0 | -950,236,350,593,891,800 | 39.346667 | 79 | 0.656753 | false |
quantosauros/cppyProject | cppy/cybosPlus/cpRqRp/StockOrderCash.py | 1 | 1827 | # coding=utf-8
'''
Created on 2016. 8. 14.
@author: Jay
'''
from cppy.adaptor import CpRqRpClass
import win32com.client
@CpRqRpClass('CpTrade.CpTd0311')
class StockOrderCash(object):
'''
장내주식/코스닥주식/ELW 주문(현금주문) 데이터를 요청하고 수신한다.
'''
def __init__(self):
self.instCpTdUtil = win32com.client.Dispatch("CpTrade.CpTdUtil")
class InputType(enumerate):
SellOrBuy = 0 #주문종류코드 (1: 매도, 2:매수)
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
class OutputType(enumerate):
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
def setInputValue(self, inputTypes, inputValues):
self.inputTypes = inputTypes
self.inputValues = inputValues
def setOutputValue(self, outputTypes):
self.outputTypes = outputTypes
def request(self, com_obj):
self.instCpTdUtil.TradeInit()
for i in range(len(self.inputTypes)) :
com_obj.SetInputValue(self.inputTypes[i], self.inputValues[i])
#계좌번호
accountNumber = self.instCpTdUtil.AccountNumber[0]
com_obj.SetInputValue(1, accountNumber)
com_obj.Request()
def response(self, com_obj):
result = ""
for j in range(0, len(self.outputTypes)) :
value = com_obj.GetHeaderValue(self.outputTypes[j])
result += str(value) + "; "
print (result)
| mit | 1,099,986,482,034,040,200 | 26.525424 | 74 | 0.544319 | false |
jieter/f-engrave | application/settings.py | 1 | 9320 | import os
def cast_boolean(value):
if type(value) is bool:
return bool(value)
elif len(value) > 1:
return value == 'True'
else:
return bool(int(value))
def cast_string(value):
value = str(value).strip()
value = value.replace('\\n', '\n')
# unquote string
if value.startswith('"') and value.endswith('"'):
return value[1:-1].strip()
else:
return value
CAST_TYPES = {
'str': cast_string,
'bool': cast_boolean,
'int': int,
'float': float
}
# Old names to maintain backwards compatibility while reading
# config files. Only supported while loading values
# from config files.
OLD_SETTING_NAMES = {
'gpost': 'gcode_postamble',
'gpre': 'gcode_preamble',
'bmp_long': 'bmp_longcurve',
'bmp_optto': 'bmp_opttolerance',
'bmp_turnp': 'bmp_turnpol',
'bmp_turds': 'bmp_turdsize',
'bmp_alpha': 'bmp_alphamax',
'v_drv_crner': 'v_drv_corner',
'v_stp_crner': 'v_step_corner',
'FEED': 'feedrate',
'PLUNGE': 'plunge_rate',
'WSPACE': 'word_space',
'CSPACE': 'char_space',
'LSPACE': 'line_space',
'TANGLE': 'text_angle',
'TCODE': 'text_code',
'H_CALC': 'height_calculation',
'XSCALE': 'xscale',
'YSCALE': 'yscale',
'STHICK': 'line_thickness',
'TRADIUS': 'text_radius',
'ZSAFE': 'zsafe',
'ZCUT': 'zcut',
}
CONFIG_FILENAME = 'config.ngc'
CONFIG_MARKER = '(fengrave_set '
CONFIG_TEMPLATE = CONFIG_MARKER + '%20s %s )'
TEXT_CODE = 'text_code'
CUT_TYPE_ENGRAVE = 'engrave'
CUT_TYPE_VCARVE = 'v-carve'
HOME_DIR = os.path.expanduser("~")
NGC_FILE = (HOME_DIR + "/None")
# IMAGE_FILE = (HOME_DIR + "/None")
IMAGE_FILE = (HOME_DIR + "/Desktop/None") # TEST
class Settings(object):
"""
Default values for the application settings.
"""
_defaults = {
'HOME_DIR': HOME_DIR,
'NGC_FILE': NGC_FILE,
'IMAGE_FILE': IMAGE_FILE,
'config_filename': CONFIG_FILENAME,
'batch': False,
'show_axis': True,
'show_box': True,
'show_thick': True,
'flip': False,
'mirror': False,
# text plotted on a circle with radius
'text_radius': 0.0,
'outer': True, # outside circle
'upper': True, # on top of cirle
'fontdex': False,
'useIMGsize': False,
# flip normals (V-carve side)
'v_flop': False,
# ball carve (ball nose cutter)
'b_carve': False,
# TODO is "BALL" shape valid, or is this covered by b_carve?
# options: 'VBIT', 'FLAT', 'BALL'
'bit_shape': 'VBIT',
# plot during v-carve calculation [GUI]
'v_pplot': False,
'inlay': False,
'no_comments': True,
# arc fitting, options 'none', 'center', 'radius'
'arc_fit': 'none',
'ext_char': False,
# disable variables in gcode [GCODE]
'var_dis': True,
# cleanup cut directions
'clean_P': True,
'clean_X': True,
'clean_Y': False,
# V-Bit cut directions
'v_clean_P': False,
'v_clean_X': True,
'v_clean_Y': False,
'yscale': 50.8,
'xscale': 100.0,
'line_space': 1.2,
'char_space': 25,
'word_space': 100,
'text_angle': 0.0,
# safe height [GCODE]
'zsafe': 5.0,
# engraving depth [GCODE]
'zcut': -0.1,
# derived value
'max_cut': 0.0,
'line_thickness': 0.25,
'border_thickness': 0.5,
# options: 'Default',
# 'Top-Left', 'Top-Center', 'Top-Right',
# 'Mid-Left', 'Mid-Center', 'Mid-Right',
# 'Bot-Left', 'Bot-Center', 'Bot-Right'
'origin': 'Default',
# options: 'Left', 'Right', 'Center'
'justify': 'Left',
# options: 'in', 'mm'
'units': 'mm',
# options: 'in/min', 'mm/min'
'feed_units': 'mm/min',
# horizontal feedrate [GCODE]
'feedrate': 60.0,
# feedrate for plunging into stock [GCODE]
'plunge_rate': 10.0,
# which bounding boxes are used to calculate line height
# options: 'max_all', 'max_use'
'height_calculation': 'max_use',
# Add a box/circle around plot
'plotbox': False,
# Gap between box and engraving
'boxgap': 6.35,
# font location and name
'fontdir': 'fonts',
'fontfile': 'normal.cxf',
# options: 'engrave', 'v-carve'
'cut_type': CUT_TYPE_ENGRAVE,
# 'cut_type': CUT_TYPE_VCARVE,
# options: 'text', 'image'
'input_type': 'text',
# 'input_type': 'image',
# v-cutter parameters
# options: 'scorch', 'voronoi'
'v_strategy': 'scorch',
'v_bit_angle': 60,
'v_bit_dia': 3.0,
'v_depth_lim': 0.0,
'v_drv_corner': 135,
'v_step_corner': 200,
'v_step_len': 0.254,
# v-carve loop accuracy
'v_acc': 0.00254,
'allowance': 0.0,
# options: 'chr', 'all'
'v_check_all': 'all',
'v_rough_stk': 0.0,
'v_max_cut': 0.0,
# options: 'black', 'white', 'right', 'left', 'minority', 'majority', or 'random'
'bmp_turnpol': 'minority',
'bmp_turdsize': 2,
'bmp_alphamax': 1.0,
'bmp_opttolerance': 0.2,
'bmp_longcurve': True,
'xorigin': 0.0,
'yorigin': 0.0,
'segarc': 5.0,
'accuracy': 0.001,
# diameter of the cleanup bit
'clean_dia': 3.0,
# clean-up step-over as percentage of the clean-up bit diameter
'clean_step': 50,
# Width of the clean-up search area (obsolete before or since v1.65)
'clean_w': 50.8,
'clean_v': 1.27,
'clean_name': '_clean',
# G-Code Default Preamble
#
# G17 : sets XY plane
# G64 P0.003 : G64 P- (motion blending tolerance set to 0.003 (units))
# G64 without P option keeps the best speed possible, no matter how
# far away from the programmed point you end up.
# M3 S3000 : Spindle start at 3000
# M7 : Turn mist coolant on
'gcode_preamble': 'G17 G64 P0.003 M3 S3000 M7',
# G-Code Default Postamble
#
# M5 : Stop Spindle
# M9 : Turn all coolant off
# M2 : End Program
'gcode_postamble': 'M5 M9 M2',
'default_text': 'OOF-Engrave',
'text_code': '',
}
def __init__(self, filename=None, autoload=False):
self._settings = self._defaults.copy()
self._text_code = u''
if filename is not None:
self.from_configfile(filename)
elif autoload:
files_to_try = (
CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + '.fengraverc'
)
available = [c for c in files_to_try if os.path.isfile(c)]
if len(available) > 0:
self.from_configfile(available[0])
def __iter__(self):
return self._settings.items()
def type(self, name):
return str(type(self._settings[name]))[7:-2]
def set(self, name, value):
if name == TEXT_CODE:
self._set_text_code(value)
else:
cast = CAST_TYPES[self.type(name)]
self._settings[name] = cast(value)
def get(self, name):
return self._settings[name]
# only for use in C-API calls
def get_dict(self):
return self._settings
def reset(self, name=None):
if name is None:
self._settings = self._defaults.copy()
else:
self.set(name, self._defaults[name])
def has_setting(self, name):
return name in self._settings
def get_fontfile(self):
return self.get('fontdir') + os.path.sep + self.get('fontfile')
def from_configfile(self, filename):
with open(filename, 'r') as config:
for line in config.readlines():
if not line.startswith(CONFIG_MARKER):
continue
line = line[len(CONFIG_MARKER):].strip()
name = line.split(' ')[0].strip()
setting = line[len(name):-1].strip()
if not self.has_setting(name) and name in OLD_SETTING_NAMES:
name = OLD_SETTING_NAMES[name]
try:
self.set(name, setting)
except KeyError:
print 'Setting not found:', name # TODO
def to_gcode(self):
gcode = [CONFIG_TEMPLATE % (key, str(value).replace('\n', '\\n'))
for key, value in self._settings.items()]
return gcode
def get_text_code(self):
return self._text_code
def _set_text_code(self, line):
text_code = u''
code_list = line.split()
for char in code_list:
try:
text_code += "%c" % unichr(int(char))
except:
text_code += "%c" % chr(int(char))
self._text_code = text_code
def __str__(self):
return 'Settings:\n' + ('\n'.join([', '.join(map(str, l)) for l in self._settings.items()]))
| gpl-3.0 | -4,150,245,928,453,565,400 | 25.704871 | 100 | 0.517275 | false |
ic-labs/django-icekit | icekit/api/images/serializers.py | 1 | 2783 | from django.apps import apps
from rest_framework import serializers
from rest_framework.settings import api_settings
from drf_queryfields import QueryFieldsMixin
from icekit.api.base_serializers import WritableSerializerHelperMixin, \
WritableRelatedFieldSettings
Image = apps.get_model('icekit_plugins_image.Image')
MediaCategory = apps.get_model('icekit.MediaCategory')
class MediaCategorySerializer(serializers.ModelSerializer):
# Redefine `name` field here to avoid `unique=True` constraint that will
# be unavoidably applied by DRF validators if we leave the field to be
# autogenerated based on the model.
name = serializers.CharField(
max_length=255,
read_only=False,
required=False,
)
class Meta:
model = MediaCategory
fields = ['id', 'name']
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
}
class ImageSerializer(
WritableSerializerHelperMixin,
QueryFieldsMixin,
serializers.HyperlinkedModelSerializer
):
"""
A serializer for an ICEkit Image.
"""
categories = MediaCategorySerializer(
many=True,
)
class Meta:
model = Image
fields = [
api_settings.URL_FIELD_NAME,
'id',
'image',
'width',
'height',
'title',
'alt_text',
'caption',
'credit',
'source',
'external_ref',
'categories',
'license',
'notes',
'date_created',
'date_modified',
'is_ok_for_web',
'is_cropping_allowed',
]
extra_kwargs = {
'url': {
'lookup_field': 'pk',
'view_name': 'api:image-api-detail',
},
}
writable_related_fields = {
'categories': WritableRelatedFieldSettings(
lookup_field=['id', 'name'], can_create=True),
}
# TODO It is probably not a good idea to allow API user to set auto-gen ID
# field, but this is the only way I have found (so far) to allow ID to be
# passed through API to relate existing images.
class RelatedImageSerializer(ImageSerializer):
"""
A serializer for an ICEkit Image relationships that exposes the ID primary
key field to permit referring to existing images by ID, instead of needing
to upload an actual image file every time.
"""
class Meta(ImageSerializer.Meta):
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
'image': {
'required': False,
}
}
| mit | 2,230,048,733,898,857,700 | 26.83 | 78 | 0.564499 | false |
rmcauley/rainwave | rainwave/playlist_objects/artist.py | 1 | 5684 | from libs import db
from libs import config
from rainwave.playlist_objects.metadata import (
AssociatedMetadata,
MetadataUpdateError,
make_searchable_string,
)
class Artist(AssociatedMetadata):
select_by_name_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE lower(artist_name) = lower(%s)"
select_by_id_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE artist_id = %s"
select_by_song_id_query = 'SELECT r4_artists.artist_id AS id, r4_artists.artist_name AS name, r4_song_artist.artist_is_tag AS is_tag, artist_order AS "order" FROM r4_song_artist JOIN r4_artists USING (artist_id) WHERE song_id = %s ORDER BY artist_order'
disassociate_song_id_query = (
"DELETE FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
)
associate_song_id_query = "INSERT INTO r4_song_artist (song_id, artist_id, artist_is_tag, artist_order) VALUES (%s, %s, %s, %s)"
has_song_id_query = "SELECT COUNT(song_id) FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
check_self_size_query = "SELECT COUNT(song_id) FROM r4_song_artist JOIN r4_songs USING (song_id) WHERE artist_id = %s AND song_verified = TRUE"
delete_self_query = "DELETE FROM r4_artists WHERE artist_id = %s"
# needs to be specialized because of artist_order
def associate_song_id(self, song_id, is_tag=None, order=None):
if not order and not self.data.get("order"):
order = db.c.fetch_var(
"SELECT MAX(artist_order) FROM r4_song_artist WHERE song_id = %s",
(song_id,),
)
if not order:
order = -1
order += 1
elif not order:
order = self.data["order"]
self.data["order"] = order
if is_tag == None:
is_tag = self.is_tag
else:
self.is_tag = is_tag
if db.c.fetch_var(self.has_song_id_query, (song_id, self.id)) > 0:
pass
else:
if not db.c.update(
self.associate_song_id_query, (song_id, self.id, is_tag, order)
):
raise MetadataUpdateError(
"Cannot associate song ID %s with %s ID %s"
% (song_id, self.__class__.__name__, self.id)
)
def _insert_into_db(self):
self.id = db.c.get_next_id("r4_artists", "artist_id")
return db.c.update(
"INSERT INTO r4_artists (artist_id, artist_name, artist_name_searchable) VALUES (%s, %s, %s)",
(self.id, self.data["name"], make_searchable_string(self.data["name"])),
)
def _update_db(self):
return db.c.update(
"UPDATE r4_artists SET artist_name = %s, artist_name_searchable = %s WHERE artist_id = %s",
(self.data["name"], make_searchable_string(self.data["name"]), self.id),
)
def _start_cooldown_db(self, sid, cool_time):
# Artists don't have cooldowns on Rainwave.
pass
def _start_election_block_db(self, sid, num_elections):
# Artists don't block elections either (OR DO THEY) (they don't)
pass
def load_all_songs(self, sid, user_id=1):
all_songs = db.c.fetch_all(
"SELECT r4_song_artist.song_id AS id, "
"r4_songs.song_origin_sid AS sid, "
"song_title AS title, "
"CAST(ROUND(CAST(song_rating AS NUMERIC), 1) AS REAL) AS rating, "
"song_exists AS requestable, "
"song_length AS length, "
"song_cool AS cool, "
"song_cool_end AS cool_end, "
"song_url as url, song_link_text as link_text, "
"COALESCE(song_rating_user, 0) AS rating_user, "
"COALESCE(song_fave, FALSE) AS fave, "
"album_name, r4_albums.album_id "
"FROM r4_song_artist "
"JOIN r4_songs USING (song_id) "
"JOIN r4_albums USING (album_id) "
"LEFT JOIN r4_album_sid ON (r4_albums.album_id = r4_album_sid.album_id AND r4_album_sid.sid = %s) "
"LEFT JOIN r4_song_sid ON (r4_songs.song_id = r4_song_sid.song_id AND r4_song_sid.sid = %s) "
"LEFT JOIN r4_song_ratings ON (r4_song_artist.song_id = r4_song_ratings.song_id AND r4_song_ratings.user_id = %s) "
"WHERE r4_song_artist.artist_id = %s AND r4_songs.song_verified = TRUE "
"ORDER BY song_exists DESC, album_name, song_title",
(sid, sid, user_id, self.id),
)
# And of course, now we have to burn extra CPU cycles to make sure the right album name is used and that we present the data
# in the same format seen everywhere else on the API. Still, much faster then loading individual song objects.
self.data["all_songs"] = {}
for configured_sids in config.station_ids:
self.data["all_songs"][configured_sids] = {}
requestable = True if user_id > 1 else False
for song in all_songs:
if not song["sid"] in config.station_ids:
continue
song["requestable"] = requestable and song["requestable"]
if not song["album_id"] in self.data["all_songs"][song["sid"]]:
self.data["all_songs"][song["sid"]][song["album_id"]] = []
self.data["all_songs"][song["sid"]][song["album_id"]].append(song)
song["albums"] = [
{
"name": song.pop("album_name"),
"id": song.pop("album_id"),
}
]
def to_dict(self, user=None):
d = super(Artist, self).to_dict(user)
d["order"] = self.data["order"]
return d
| gpl-2.0 | 7,762,295,732,175,003,000 | 46.764706 | 257 | 0.574771 | false |
roryk/bipy | bipy/toolbox/cutadapt_tool.py | 1 | 5658 | """This module provides an interface to cutadapt with a set of commonly
used adapters for trimming
"""
from bipy.utils import flatten_options, append_stem, flatten, which
import subprocess
import os
from bcbio.utils import safe_makedir, file_exists
import sh
import yaml
import bcbio.provenance.do as do
# adapter sequences for various commonly used systems
ADAPTERS = {}
ADAPTERS["illumina"] = [
["ACACTCTTTCCCTACACGACGCTCTTCCGATCT", "-a", "ill_pe_adapter1"],
["TGTGAGAAAGGGATGTGCTGCGAGAAGGCTAG", "-a", "ill_pe_adapter1_rc"],
["GATCGGAAGAGCGGTTCAGCAGGAATGCCGAG", "-a", "ill_pe_adapter2"],
["TCTAGCCTTCTCGCCAAGTCGTCCTTACGGCTC", "-a", "ill_pe_adapter2_rc"]]
ADAPTERS["nextera"] = [
["AATGATACGGCGACCACCGAGATCTACACGCCTCCCTCGCGCCATCAG", "-a",
"nex_pe_adapter1"],
["CTGATGGCGCGAGGGAGGCGTGTAGATCTCGGTGGTCGCCGTATCATT", "-a",
"nex_pe_adapter1_rc"],
["CAAGCAGAAGACGGCATACGAGATCGGTCTGCCTTGCCAGCCCGCTCAG",
"-a", "nex_pe_adapter2_nobc"],
["CTGAGCGGGCTGGCAAGGCAGACCGATCTCGTATGCCGTCTTCTGCTTG",
"-a", "nex_pe_adapter2_nobc_rc"],
["CTGATGGCGCGAGGGAGGCGTGTAGATCTCGGTGGTCGCCGTATCATTCTGTCTCTTATACACATCT",
"-a", "nex_transposon_pe_adapter1_rc"],
["AGATGTGTATAAGAGACAGAATGATACGGCGACCACCGAGATCTACACGCCTCCCTCGCGCCATCAG",
"-a", "nex_transposon_pe_adapter1"],
["AGATGTGTATAAGAGACAGCAAGCAGAAGACGGCATACGAGATCGGTCTGCCTTGCCAGCCCGCTCAG",
"-a", "nex_tranposon_pe_adapter2"]]
ADAPTERS["polya"] = [
["AAAAAAAAAAAAAAAAAAAAAAAAAAA", "-a", "polyA tail"],
["TTTTTTTTTTTTTTTTTTTTTTTTTTT", "-a", "polyT tail"]]
ADAPTERS["iontorrent"] = [
["CCACTACGCCTCCGCTTTCCTCTCTATGGGCAGTCGGTGAT", "-a",
"ion_5_prime_adapter"],
["CTGAGTCGGAGACACGCAGGGATGAGATGG", "-a", "3_prime_adapter"],
["ATCACCGACTGCCCATAGAGAGGAAAGCGGAGGCGTAGTGG", "-a",
"5_prime_adapter_rc"],
["CCATCTCATCCCTGCGTGTCTCCGACTCAG", "-a", "3_prime_adapter_rc"]]
TRUSEQ_BARCODES = {"ATCACG": 1, "AGTCAA": 13, "ACTGAT": 25, "CGGAAT": 37,
"CGATGT": 2, "AGTTCC": 14, "ATGAGC": 26, "CTAGCT": 38,
"TTAGGC": 3, "ATGTCA": 15, "ATTCCT": 27, "CTATAC": 39,
"TGACCA": 4, "CCGTCC": 16, "CAAAAG": 28, "CTCAGA": 40,
"ACAGTG": 5, "GTAGAG": 17, "CAACTA": 29, "GACGAC": 41,
"GCCAAT": 6, "GTCCGC": 18, "CACCGG": 30, "TAATCG": 42,
"CAGATC": 7, "GTGAAA": 19, "CACGAT": 31, "TACAGC": 43,
"ACTTGA": 8, "GTGGCC": 20, "CACTCA": 32, "TATAAT": 44,
"GATCAG": 9, "GTTTCG": 21, "CAGGCG": 33, "TCATTC": 45,
"TAGCTT": 10, "CGTACG": 22, "CATGGC": 34, "TCCCGA": 46,
"GGCTAC": 11, "GAGTGG": 23, "CATTTT": 35, "TCGAAG": 47,
"CTTGTA": 12, "GGTAGC": 24, "CCAACA": 36, "TCGGCA": 48}
VALID_TRUSEQ_RNASEQ = {k: v for (k, v) in TRUSEQ_BARCODES.items() if v < 13}
TRUSEQ_PREFIX = "GATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
def truseq_barcode_lookup(barcode, small=False):
"""
looks up a truseq adapter sequence by inserting the barcode in the
correct sequence. throws an exception if the barcode does not match
known barcodes
"""
prefix = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
suffix = "ATCTCGTATGCCGTCTTCTGCTTG"
if small:
raise NotImplementedError("Small RNA barcodes not implemented. Need "
"to check to make sure the prefix and "
"suffix sequences are the same as the "
"RNA-seq barcodes.")
if barcode not in VALID_TRUSEQ_RNASEQ:
raise ValueError("Barcode not found in TruSeq barcodes. Might need "
"to implement v1 and v2 versions.")
return prefix + barcode + suffix
def _get_adapter(adapter):
return [adapter[1], adapter[0]]
def _get_platform_adapters(platform):
platform_adapters = ADAPTERS.get(platform, [])
adapters = map(_get_adapter, platform_adapters)
return adapters
def _parse(config):
# handle the adapters, defaulting to illumina and a poly-a trimmer
# if none are provided
adapters = []
adapters += flatten(map(_get_adapter,
config.get("adapters", [])))
# add built in platform if available
platform = config.get("platform", None)
if platform:
adapters += flatten(map(_get_platform_adapters,
[p for p in platform if p in ADAPTERS]))
# default to illumina and poly A
if not adapters:
adapters += flatten(map(_get_platform_adapters,
[p for p in ["illumina", "polya"]]))
arguments = []
arguments += adapters
# grab everything else
arguments += config.get("options", [])
return map(str, list(flatten(arguments)))
def run(in_file, stage_config, config):
arguments = [stage_config["program"]]
arguments += _parse(stage_config)
results_dir = config["dir"].get("results", None)
if results_dir:
out_dir = os.path.join(results_dir, "cutadapt")
safe_makedir(out_dir)
out_file = os.path.join(out_dir,
os.path.basename(append_stem(in_file,
"trimmed")))
else:
out_file = append_stem(in_file, "trimmed")
if file_exists(out_file):
return out_file
arguments.extend(["--output", out_file, in_file])
do.run(arguments, "Running cutadapt on %s." % (in_file),
None)
return out_file
def _common_prefix(first, second):
for i, (x, y) in enumerate(zip(first, second)):
if x != y:
break
return first[:i]
| mit | -3,929,628,771,837,746,700 | 39.705036 | 77 | 0.61011 | false |
dgjnpr/py-junos-eznc | lib/jnpr/junos/factory/view.py | 1 | 8729 | import warnings
from contextlib import contextmanager
from copy import deepcopy
from lxml import etree
from jnpr.junos.factory.viewfields import ViewFields
class View(object):
"""
View is the base-class that makes extracting values from XML
data appear as objects with attributes.
"""
ITEM_NAME_XPATH = 'name'
FIELDS = {}
GROUPS = None
# -------------------------------------------------------------------------
# CONSTRUCTOR
# -------------------------------------------------------------------------
def __init__(self, table, view_xml):
"""
:table:
instance of the RunstatTable
:view_xml:
this should be an lxml etree Elemenet object. This
constructor also accepts a list with a single item/XML
"""
# if as_xml is passed as a list, make sure it only has
# a single item, common response from an xpath search
if isinstance(view_xml, list):
if 1 == len(view_xml):
view_xml = view_xml[0]
else:
raise ValueError("constructor only accepts a single item")
# now ensure that the thing provided is an lxml etree Element
if not isinstance(view_xml, etree._Element):
raise ValueError("constructor only accecpts lxml.etree._Element")
self._table = table
self.ITEM_NAME_XPATH = table.ITEM_NAME_XPATH
self._init_xml(view_xml)
def _init_xml(self, given_xml):
self._xml = given_xml
if self.GROUPS is not None:
self._groups = {}
for xg_name, xg_xpath in self.GROUPS.items():
xg_xml = self._xml.xpath(xg_xpath)
# @@@ this is technically an error; need to trap it
if not len(xg_xml):
continue
self._groups[xg_name] = xg_xml[0]
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def T(self):
""" return the Table instance for the View """
return self._table
@property
def D(self):
""" return the Device instance for this View """
return self.T.D
@property
def name(self):
""" return the name of view item """
if self.ITEM_NAME_XPATH is None:
return self._table.D.hostname
if isinstance(self.ITEM_NAME_XPATH, str):
# simple key
return self._xml.findtext(self.ITEM_NAME_XPATH).strip()
else:
# composite key
# return tuple([self.xml.findtext(i).strip() for i in
# self.ITEM_NAME_XPATH])
return tuple([self.xml.xpath(i)[0].text.strip()
for i in self.ITEM_NAME_XPATH])
# ALIAS key <=> name
key = name
@property
def xml(self):
""" returns the XML associated to the item """
return self._xml
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def keys(self):
""" list of view keys, i.e. field names """
return self.FIELDS.keys()
def values(self):
""" list of view values """
return [getattr(self, field) for field in self.keys()]
def items(self):
""" list of tuple(key,value) """
return zip(self.keys(), self.values())
def _updater_instance(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS = deepcopy(self.__class__.FIELDS)
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS = deepcopy(self.__class__.GROUPS)
self.GROUPS.update(more.groups)
def _updater_class(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS.update(more.groups)
@contextmanager
def updater(self, fields=True, groups=False, all=True, **kvargs):
"""
provide the ability for subclassing objects to extend the
definitions of the fields. this is implemented as a
context manager with the form called from the subclass
constructor:
with self.extend() as more:
more.fields = <dict>
more.groups = <dict> # optional
"""
# ---------------------------------------------------------------------
# create a new object class so we can attach stuff to it arbitrarily.
# then pass that object to the caller, yo!
# ---------------------------------------------------------------------
more = type('RunstatViewMore', (object,), {})()
if fields is True:
more.fields = RunstatMakerViewFields()
# ---------------------------------------------------------------------
# callback through context manager
# ---------------------------------------------------------------------
yield more
updater = self._updater_class if all is True else \
self._updater_instance
updater(more)
def asview(self, view_cls):
""" create a new View object for this item """
return view_cls(self._table, self._xml)
def refresh(self):
"""
~~~ EXPERIMENTAL ~~~
refresh the data from the Junos device. this only works if the table
provides an "args_key", does not update the original table, just this
specific view/item
"""
warnings.warn("Experimental method: refresh")
if self._table.can_refresh is not True:
raise RuntimeError("table does not support this feature")
# create a new table instance that gets only the specific named
# value of this view
tbl_xml = self._table._rpc_get(self.name)
new_xml = tbl_xml.xpath(self._table.ITEM_XPATH)[0]
self._init_xml(new_xml)
return self
# -------------------------------------------------------------------------
# OVERLOADS
# -------------------------------------------------------------------------
def __repr__(self):
""" returns the name of the View with the associate item name """
return "%s:%s" % (self.__class__.__name__, self.name)
def __getattr__(self, name):
"""
returns a view item value, called as :obj.name:
"""
item = self.FIELDS.get(name)
if item is None:
raise ValueError("Unknown field: '%s'" % name)
if 'table' in item:
# if this is a sub-table, then return that now
return item['table'](self.D, self._xml)
# otherwise, not a sub-table, and handle the field
astype = item.get('astype', str)
if 'group' in item:
found = self._groups[item['group']].xpath(item['xpath'])
else:
found = self._xml.xpath(item['xpath'])
len_found = len(found)
if astype is bool:
# handle the boolean flag case separately
return bool(len_found)
if not len_found:
# even for the case of numbers, do not set the value. we
# want to detect "does not exist" vs. defaulting to 0
# -- 2013-nov-19, JLS.
return None
try:
# added exception handler to catch malformed xpath expressesion
# -- 2013-nov-19, JLS.
# added support to handle multiple xpath values, i.e. a list of
# things that have the same xpath expression (common in configs)
# -- 2031-dec-06, JLS
# added support to use the element tag if the text is empty
def _munch(x):
as_str = x if isinstance(x, str) else x.text
if as_str is not None:
as_str = as_str.strip()
if not as_str:
as_str = x.tag # use 'not' to test for empty
return astype(as_str)
if 1 == len_found:
return _munch(found[0])
return [_munch(this) for this in found]
except:
raise RuntimeError("Unable to handle field:'%s'" % name)
# and if we are here, then we didn't handle the field.
raise RuntimeError("Unable to handle field:'%s'" % name)
def __getitem__(self, name):
"""
allow the caller to extract field values using :obj['name']:
the same way they would do :obj.name:
"""
return getattr(self, name)
| apache-2.0 | -8,300,979,280,519,996,000 | 33.366142 | 79 | 0.500745 | false |
2baOrNot2ba/AntPat | scripts/viewJonespat_dual.py | 1 | 2897 | #!/usr/bin/env python
"""A simple viewer for Jones patterns for dual-polarized representations.
"""
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid
from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM
from antpat.reps.hamaker import convLOFARcc2DPE
import antpat.io.filetypes as antfiles
def plotJonesCanonical(theta, phi, jones, dpelemname):
normalize = True
dbscale = True
polarplt = True
IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M
g, IXRJ = jones2gIXR(jones)
IXRM = IXRJ2IXRM(IXRJ)
if IXRTYPE == 'IXR_J':
IXR = IXRJ
elif IXRTYPE == 'IXR_J':
IXR = IXRM
else:
raise RuntimeError("""Error: IXR type {} unknown.
Known types are IXR_J, IXR_M.""".format(IXRTYPE))
fig = plt.figure()
fig.suptitle(dpelemname)
plt.subplot(121, polar=polarplt)
if normalize:
g_max = numpy.max(g)
g = g/g_max
if dbscale:
g = 20*numpy.log10(g)
# nrlvls = 5
# g_lvls = numpy.max(g) - 3.0*numpy.arange(nrlvls)
plt.pcolormesh(phi, numpy.rad2deg(theta), g)
# plt.contour( phi, numpy.rad2deg(theta), g_dress, levels = g_lvls)
plt.colorbar()
plt.title('Amp gain')
plt.subplot(122, polar=polarplt)
plt.pcolormesh(phi, numpy.rad2deg(theta), 10*numpy.log10(IXR))
plt.colorbar()
plt.title('IXR_J')
plt.show()
def plotFFpat():
from antpat.reps.sphgridfun import tvecfun
for polchan in [0, 1]:
E_th = jones[:, :, polchan, 0].squeeze()
E_ph = jones[:, :, polchan, 1].squeeze()
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph, args.freq,
vcoordlist=['Ludwig3'], projection='orthographic')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("freq", type=float,
help="Frequency in Hertz")
parser.add_argument("filename", help="""
Filename of dual-polarization FF, Hamaker-Arts format,
or a single-polarization FF (p-channel)""")
parser.add_argument("filename_q", nargs='?',
help="""
Filename of second (q-channel) single-polarization FF.
""")
args = parser.parse_args()
if args.filename.endswith(antfiles.HamArtsuffix):
hp = convLOFARcc2DPE(args.filename, [args.freq])
elif args.filename.endswith(antfiles.FEKOsuffix):
hp = DualPolElem()
hp.load_ffes(args.filename, args.filename_q)
else:
raise RuntimeError("dual-pol pattern file type not known")
THETA, PHI = ZenHemisphGrid()
jones = hp.getJonesAlong([args.freq], (THETA, PHI))
plotFFpat()
# plotJonesCanonical(THETA, PHI, jones, os.path.basename(args.filename)
# + ' (' + str(args.freq/1e6) + ' MHz)')
| isc | 7,596,389,316,562,434,000 | 33.488095 | 78 | 0.613393 | false |
bgmerrell/desmod | tests/test_timescale.py | 1 | 1953 | import pytest
from desmod.timescale import parse_time, scale_time
@pytest.mark.parametrize('test_input, expected', [
('12 s', (12, 's')),
('12s', (12, 's')),
('+12s', (12, 's')),
('-12s', (-12, 's')),
('12.0 s', (12.0, 's')),
('12. s', (12.0, 's')),
('+12.0 s', (12.0, 's')),
('-12.0 s', (-12.0, 's')),
('12.000 s', (12.0, 's')),
('1.2e1 s', (12.0, 's')),
('1.2e+1 s', (12.0, 's')),
('1.2e-1 s', (0.12, 's')),
('-1.2e-1 s', (-0.12, 's')),
('12.s', (12.0, 's')),
('12.0s', (12.0, 's')),
('12.000s', (12.0, 's')),
('1.2e1s', (12.0, 's')),
('.12e+2s', (12.0, 's')),
('.12s', (0.12, 's')),
('12 fs', (12, 'fs')),
('12 ps', (12, 'ps')),
('12 ns', (12, 'ns')),
('12 us', (12, 'us')),
('12 ms', (12, 'ms')),
('12.0ms', (12.0, 'ms')),
('s', (1, 's')),
('fs', (1, 'fs')),
])
def test_parse_time(test_input, expected):
m, u = parse_time(test_input)
assert (m, u) == expected
assert isinstance(m, type(expected[0]))
@pytest.mark.parametrize('test_input', [
'',
'123 s',
'123',
'123.0',
'123 S',
'123 Ms',
'123e1.3 s',
'+-123 s',
'123 ks',
'. s',
'1-.1 s',
'1e1.2 s',
])
def test_parse_time_except(test_input):
with pytest.raises(ValueError) as exc_info:
parse_time(test_input)
assert 'float' not in str(exc_info.value)
def test_parse_time_default():
assert parse_time('123', default_unit='ms') == (123, 'ms')
@pytest.mark.parametrize('input_t, input_tscale, expected', [
((1, 'us'), (1, 'us'), 1),
((1, 'us'), (10, 'us'), 0.1),
((1000, 'us'), (1, 'ms'), 1),
((1, 'us'), (100, 'ms'), 1e-5),
((50, 'ms'), (1, 'ns'), 50000000),
((5.2, 'ms'), (1, 'us'), 5200),
])
def test_scale_time(input_t, input_tscale, expected):
scaled = scale_time(input_t, input_tscale)
assert expected == scaled
assert isinstance(scaled, type(expected))
| mit | -2,659,281,447,644,491,300 | 24.697368 | 62 | 0.453661 | false |
bourguet/operator_precedence_parsing | operator_precedence.py | 1 | 7999 | #! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, symbol, lprio, rprio, evaluator):
self.symbol = symbol
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
self.value = None
def __repr__(self):
return '<Symbol {} {}/{}: {}>'.format(self.symbol, self.lprio, self.rprio, self.value)
def identity_evaluator(args):
if len(args) == 1 and type(args[0]) == SymbolDesc:
return Node(args[0].symbol)
else:
return CompositeNode('ID ERROR', args)
def binary_evaluator(args):
if len(args) != 3 or type(args[0]) == SymbolDesc or type(args[1]) != SymbolDesc or type(args[2]) == SymbolDesc:
return CompositeNode('BINARY ERROR', args)
return CompositeNode(args[1].symbol, [args[0], args[2]])
class Parser:
def __init__(self):
self.symbols = {}
self.symbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
self.symbols['$eoi$'] = SymbolDesc('$eoi$', 0, 0, None)
self.reset()
def register_symbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = binary_evaluator
if type(oper) is str:
self.symbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.symbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def reset(self):
self.stack = [self.symbols['$soi$']]
def id_symbol(self, id):
return SymbolDesc(id, 1000, 1000, identity_evaluator)
def evaluate(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
curprio = self.stack[idx].lprio
while type(self.stack[idx-1]) != SymbolDesc or self.stack[idx-1].rprio == curprio:
idx -= 1
if type(self.stack[idx]) == SymbolDesc:
curprio = self.stack[idx].lprio
args = self.stack[idx:]
self.stack = self.stack[:idx]
for i in args:
if type(i) == SymbolDesc:
self.stack.append(i.evaluator(args))
return
raise RuntimeError('Internal error: no evaluator found in {}'.format(args))
def tos_symbol(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
return self.stack[idx]
def shift(self, sym):
while self.tos_symbol().rprio > sym.lprio:
self.evaluate()
self.stack.append(sym)
def push_eoi(self):
self.shift(self.symbols['$eoi$'])
def parse(self, s):
self.reset()
for tk in lexer.tokenize(s):
if tk.lexem in self.symbols:
self.shift(self.symbols[tk.lexem])
elif tk.kind == 'ID':
self.shift(self.id_symbol(tk))
elif tk.kind == 'NUMBER':
self.shift(self.id_symbol(tk))
else:
raise RuntimeError('Unexpected symbol: {}'.format(tk))
self.push_eoi()
if len(self.stack) != 3:
raise RuntimeError('Internal error: bad state of stack at end')
return self.stack[1]
def dump(self):
print('Stack')
for oper in self.stack:
print(' {}'.format(oper))
def open_parenthesis_evaluator(args):
if (len(args) == 3
and type(args[0]) == SymbolDesc and args[0].symbol == '('
and type(args[1]) != SymbolDesc
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return args[1]
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return CompositeNode('call', [args[0]])
elif (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ')'):
if args[2].token == ',':
callargs = args[2].children
else:
callargs = [args[2]]
callargs.insert(0, args[0])
return CompositeNode('call', callargs)
else:
return CompositeNode('( ERROR', args)
def close_parenthesis_evaluator(args):
return CompositeNode(') ERROR', args)
def open_bracket_evaluator(args):
return CompositeNode('get', [args[0], args[2]])
def close_bracket_evaluator(args):
return CompositeNode('] ERROR', args)
def coma_evaluator(args):
return CompositeNode(',', [x for x in args if type(x) != SymbolDesc])
def unary_evaluator(args):
if len(args) != 2:
return CompositeNode('UNARY ERROR', args)
if type(args[0]) == SymbolDesc and type(args[1]) != SymbolDesc:
return CompositeNode(args[0].symbol, [args[1]])
elif type(args[0]) != SymbolDesc and type(args[1]) == SymbolDesc:
return CompositeNode('post'+args[1].symbol, [args[0]])
else:
return CompositeNode('UNARY ERROR', args)
def unary_or_binary_evaluator(args):
if (len(args) == 2
and type(args[0]) == SymbolDesc
and type(args[1]) != SymbolDesc):
return CompositeNode(args[0].symbol, [args[1]])
elif (len(args) == 2
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc):
return CompositeNode('post'+args[1].symbol, [args[0]])
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc
and type(args[2]) != SymbolDesc):
return CompositeNode(args[1].symbol, [args[0], args[2]])
else:
return CompositeNode('1,2-ARY ERROR', args)
def question_evaluator(args):
if (len(args) != 5
or type(args[0]) == SymbolDesc
or type(args[1]) != SymbolDesc or args[1].symbol != '?'
or type(args[2]) == SymbolDesc
or type(args[3]) != SymbolDesc or args[3].symbol != ':'
or type(args[4]) == SymbolDesc):
return CompositeNode('? ERROR', args)
return CompositeNode('?', [args[0], args[2], args[4]])
def colon_evaluator(args):
return CompositeNode(': ERROR', args)
def cexp_parser():
parser = Parser()
parser.register_symbol(',', 2, 2, coma_evaluator)
parser.register_symbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_symbol('?', 7, 1.5, question_evaluator)
parser.register_symbol(':', 1.5, 6, colon_evaluator)
parser.register_symbol('||', 8, 9)
parser.register_symbol('&&', 10, 11)
parser.register_symbol('|', 12, 13)
parser.register_symbol('^', 14, 15)
parser.register_symbol('&', 16, 17, unary_or_binary_evaluator)
parser.register_symbol(['==', '!='], 18, 19)
parser.register_symbol(['<', '>', '<=', '>='], 20, 21)
parser.register_symbol(['<<', '>>'], 22, 23)
parser.register_symbol(['+', '-'], 24, 25, unary_or_binary_evaluator)
parser.register_symbol(['/', '%'], 26, 27)
parser.register_symbol(['*'], 26, 27, unary_or_binary_evaluator)
parser.register_symbol('**', 29, 28)
parser.register_symbol(['++', '--', '~', '!'], 31, 30, unary_evaluator) # +, -, *, & should be here
parser.register_symbol(['.', '->'], 32, 33)
parser.register_symbol('(', 100, 1, open_parenthesis_evaluator)
parser.register_symbol(')', 1, 100, close_parenthesis_evaluator)
parser.register_symbol('[', 100, 1, open_bracket_evaluator)
parser.register_symbol(']', 1, 100, close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
| bsd-2-clause | -1,557,918,058,041,208,000 | 33.478448 | 115 | 0.56182 | false |
hernandito/SickRage | sickbeard/providers/generic.py | 1 | 26015 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import itertools
from random import shuffle
from base64 import b16encode, b32decode
import requests
from hachoir_parser import createParser
import sickbeard
from sickbeard import helpers, classes, logger, db
from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT
from sickbeard import tvcache
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.common import Quality
from sickbeard.common import user_agents
from sickrage.helper.common import sanitize_filename
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickrage.show.Show import Show
from sickbeard import show_name_helpers
class GenericProvider(object):
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.urls = {}
self.url = ''
self.public = False
self.show = None
self.supportsBacklog = True
self.supportsAbsoluteNumbering = False
self.anime_only = False
self.search_mode = None
self.search_fallback = False
self.enabled = False
self.enable_daily = False
self.enable_backlog = False
self.cache = tvcache.TVCache(self)
self.session = requests.Session()
shuffle(user_agents)
self.headers = {'User-Agent': user_agents[0]}
self.btCacheURLS = [
'http://torcache.net/torrent/{torrent_hash}.torrent',
'http://thetorrent.org/torrent/{torrent_hash}.torrent',
'http://btdig.com/torrent/{torrent_hash}.torrent',
# 'http://torrage.com/torrent/{torrent_hash}.torrent',
# 'http://itorrents.org/torrent/{torrent_hash}.torrent',
]
shuffle(self.btCacheURLS)
self.proper_strings = ['PROPER|REPACK|REAL']
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub(r"[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
# pylint: disable=no-self-use,unused-variable
# Method could be a function, Unused variable
def _checkAuth(self):
return True
def _doLogin(self):
return True
def isActive(self):
return False
def isEnabled(self):
return self.enabled
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, params=None, timeout=30, json=False, needBytes=False):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json, needBytes=needBytes)
def _makeURL(self, result):
urls = []
filename = u''
if result.url.startswith('magnet'):
try:
torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper()
try:
torrent_name = re.findall('dn=([^&]+)', result.url)[0]
except Exception:
torrent_name = 'NO_DOWNLOAD_NAME'
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).upper()
if not torrent_hash:
logger.log(u"Unable to extract torrent hash from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS]
except Exception:
logger.log(u"Unable to extract torrent hash or name from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
else:
urls = [result.url]
if self.providerType == GenericProvider.TORRENT:
filename = ek(os.path.join, sickbeard.TORRENT_DIR, sanitize_filename(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
filename = ek(os.path.join, sickbeard.NZB_DIR, sanitize_filename(result.name) + '.' + self.providerType)
return urls, filename
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if url.startswith('http'):
self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
# Support for Jackett/TorzNab
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT
if helpers.download_file(url, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers.remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
def _verify_download(self, file_name=None):
"""
Checks the saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if file_name.endswith(GenericProvider.TORRENT):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
logger.log(u"Result is not a valid torrent file", logger.DEBUG)
return False
return True
def searchRSS(self, episodes):
return self.cache.findNeededEpisodes(episodes)
def getQuality(self, item, anime=False):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item)
quality = Quality.sceneQuality(title, anime)
return quality
# pylint: disable=no-self-use,unused-argument
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
return []
def _get_season_search_strings(self, episode):
return []
def _get_episode_search_strings(self, eb_obj, add_string=''):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('title', '')
if title:
title = u'' + title.replace(' ', '.')
url = item.get('link', '')
if url:
url = url.replace('&', '&').replace('%26tr%3D', '&tr=')
return title, url
def _get_size(self, item):
"""Gets the size from the item"""
logger.log(u"Provider type doesn't have _get_size() implemented yet", logger.ERROR)
return -1
def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
self._checkAuth()
self.show = show
results = {}
itemList = []
searched_scene_season = None
for epObj in episodes:
# search cache for episode result
cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
if cacheResult:
if epObj.episode not in results:
results[epObj.episode] = cacheResult
else:
results[epObj.episode].extend(cacheResult)
# found result, search next episode
continue
# skip if season already searched
if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
continue
# mark season searched for season pack searches so we can skip later on
searched_scene_season = epObj.scene_season
search_strings = []
if len(episodes) > 1 and search_mode == 'sponly':
# get season search results
search_strings = self._get_season_search_strings(epObj)
elif search_mode == 'eponly':
# get single episode search results
search_strings = self._get_episode_search_strings(epObj)
first = search_strings and isinstance(search_strings[0], dict) and 'rid' in search_strings[0]
if first:
logger.log(u'First search_string has rid', logger.DEBUG)
for curString in search_strings:
itemList += self._doSearch(curString, search_mode, len(episodes), epObj=epObj)
if first:
first = False
if itemList:
logger.log(u'First search_string had rid, and returned results, skipping query by string', logger.DEBUG)
break
else:
logger.log(u'First search_string had rid, but returned no results, searching with string query', logger.DEBUG)
# if we found what we needed already from cache then return results and exit
if len(results) == len(episodes):
return results
# sort list by quality
if len(itemList):
items = {}
itemsUnknown = []
for item in itemList:
quality = self.getQuality(item, anime=show.is_anime)
if quality == Quality.UNKNOWN:
itemsUnknown += [item]
else:
if quality not in items:
items[quality] = [item]
else:
items[quality].append(item)
itemList = list(itertools.chain(*[v for (k, v) in sorted(items.iteritems(), reverse=True)]))
itemList += itemsUnknown if itemsUnknown else []
# filter results
cl = []
for item in itemList:
(title, url) = self._get_title_and_url(item)
# parse the file name
try:
myParser = NameParser(parse_method=('normal', 'anime')[show.is_anime])
parse_result = myParser.parse(title)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
continue
except InvalidShowException:
logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
continue
showObj = parse_result.show
quality = parse_result.quality
release_group = parse_result.release_group
version = parse_result.version
addCacheEntry = False
if not (showObj.air_by_date or showObj.sports):
if search_mode == 'sponly':
if len(parse_result.episode_numbers):
logger.log(
u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
logger.DEBUG)
addCacheEntry = True
if len(parse_result.episode_numbers) and (parse_result.season_number not in set([ep.season for ep in episodes])
or not [ep for ep in episodes if ep.scene_episode in parse_result.episode_numbers]):
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
else:
if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
if not addCacheEntry:
# we just use the existing info for normal searches
actual_season = parse_result.season_number
actual_episodes = parse_result.episode_numbers
else:
sameDaySpecial = False
if not parse_result.is_air_by_date:
logger.log(
u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
logger.DEBUG)
addCacheEntry = True
else:
airdate = parse_result.air_date.toordinal()
myDB = db.DBConnection()
sql_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
[showObj.indexerid, airdate])
if len(sql_results) == 2:
if int(sql_results[0]['season']) == 0 and int(sql_results[1]['season']) != 0:
actual_season = int(sql_results[1]["season"])
actual_episodes = [int(sql_results[1]["episode"])]
sameDaySpecial = True
elif int(sql_results[1]['season']) == 0 and int(sql_results[0]['season']) != 0:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
sameDaySpecial = True
elif len(sql_results) != 1:
logger.log(
u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
logger.WARNING)
addCacheEntry = True
if not addCacheEntry and not sameDaySpecial:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
# add parsed result to cache for usage later on
if addCacheEntry:
logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
# pylint: disable=protected-access
# Access to a protected member of a client class
ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
if ci is not None:
cl.append(ci)
continue
# make sure we want the episode
wantEp = True
for epNo in actual_episodes:
if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
wantEp = False
break
if not wantEp:
logger.log(
u"Ignoring result " + title + " because we don't want an episode that is " +
Quality.qualityStrings[
quality], logger.INFO)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
# make a result object
epObj = []
for curEp in actual_episodes:
epObj.append(showObj.getEpisode(actual_season, curEp))
result = self.getResult(epObj)
result.show = showObj
result.url = url
result.name = title
result.quality = quality
result.release_group = release_group
result.version = version
result.content = None
result.size = self._get_size(item)
if len(epObj) == 1:
epNum = epObj[0].episode
logger.log(u"Single episode result.", logger.DEBUG)
elif len(epObj) > 1:
epNum = MULTI_EP_RESULT
logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
parse_result.episode_numbers), logger.DEBUG)
elif len(epObj) == 0:
epNum = SEASON_RESULT
logger.log(u"Separating full season result to check for later", logger.DEBUG)
if epNum not in results:
results[epNum] = [result]
else:
results[epNum].append(result)
# check if we have items to add to cache
if len(cl) > 0:
# pylint: disable=protected-access
# Access to a protected member of a client class
myDB = self.cache._getDB()
myDB.mass_action(cl)
return results
def findPropers(self, search_date=None):
results = self.cache.listPropers(search_date)
return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
results]
def seedRatio(self):
'''
Provider should override this value if custom seed ratio enabled
It should return the value of the provider seed ratio
'''
return ''
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.NZB
def isActive(self):
return sickbeard.USE_NZBS and self.isEnabled()
def _get_size(self, item):
try:
size = item.get('links')[1].get('length', -1)
except IndexError:
size = -1
if not size:
logger.log(u"Size was not found in your provider response", logger.DEBUG)
return int(size)
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.TORRENT
def isActive(self):
return sickbeard.USE_TORRENTS and self.isEnabled()
def _get_title_and_url(self, item):
from feedparser.util import FeedParserDict
if isinstance(item, (dict, FeedParserDict)):
title = item.get('title', '')
download_url = item.get('url', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
title = item[0]
download_url = item[1]
# Temp global block `DIAMOND` releases
if title.endswith('DIAMOND'):
logger.log(u'Skipping DIAMOND release for mass fake releases.')
title = download_url = u'FAKERELEASE'
if title:
title = self._clean_title_from_provider(title)
if download_url:
download_url = download_url.replace('&', '&')
return (title, download_url)
def _get_size(self, item):
size = -1
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024*1024:
size = -1
return size
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string.encode('utf-8').strip())
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name + ' '
if ep_obj.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif ep_obj.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + ('|', ' ')[len(self.proper_strings) > 1] + ep_obj.airdate.strftime('%b')
elif ep_obj.show.anime:
ep_string += "%02d" % int(ep_obj.scene_absolute_number)
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string = ep_string + ' %s' % add_string
search_string['Episode'].append(ep_string.encode('utf-8').strip())
return [search_string]
@staticmethod
def _clean_title_from_provider(title):
return (title or '').replace(' ', '.')
@property
def _custom_trackers(self):
return ('', '&tr=' + '&tr='.join(set([x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()])))[self.public] if sickbeard.TRACKERS_LIST else ''
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST]) + ')'
)
for sqlshow in sqlResults or []:
show = Show.find(sickbeard.showList, int(sqlshow["showid"]))
if show:
curEp = show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
for term in self.proper_strings:
searchString = self._get_episode_search_strings(curEp, add_string=term)
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), show))
return results
| gpl-3.0 | -7,911,745,144,782,687,000 | 37.944611 | 189 | 0.554757 | false |
diefans/ferment | src/ferment/scripts.py | 1 | 2313 | import click
import docker
from wheezy.template.engine import Engine
from wheezy.template.ext.core import CoreExtension
from wheezy.template.ext.code import CodeExtension
from wheezy.template.loader import DictLoader
from . import templates
import logging
LOG = logging.getLogger(__name__)
LOG_LEVELS = {
"info": logging.INFO,
"warn": logging.WARN,
"debug": logging.DEBUG,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
class Context(dict):
def __init__(self, *args, **kwargs):
self.__dict__ = self
super(Context, self).__init__(*args, **kwargs)
class FermConfig(object):
def __init__(self, path):
self.path = path
template_dct = {
'docker': templates.docker,
}
engine = Engine(
loader=DictLoader(template_dct),
extensions=[
CoreExtension(),
CodeExtension()
]
)
self.templates = {
name: engine.get_template(name) for name in template_dct
}
def get_config(self, config):
return self.templates['docker'].render(config)
@click.group()
@click.option(
"--log-level",
type=click.Choice([k for k, v in sorted(LOG_LEVELS.items(), key=lambda x: x[1])]),
default="info",
help="Logging level.")
@click.pass_context
def run(ctx, log_level):
logging.basicConfig(level=LOG_LEVELS[log_level])
ctx.obj = Context()
@run.group("docker")
@click.option(
"api", "--docker", "-d",
type=click.Path(),
default="unix://var/run/docker.sock",
help="The docker api socket."
)
@click.option(
"--cidr", "-c", default="172.18.0.0/16",
help="Docker CIDR."
)
@click.option(
"--interface", "-i", default="docker0",
help="Docker interface."
)
@click.pass_context
def docker_grp(ctx, api, cidr, interface):
ctx.obj.client = docker.Client(base_url=api)
ctx.obj.cidr = cidr
ctx.obj.interface = interface
@docker_grp.command(name="config")
@click.pass_context
def docker_config(ctx):
ferm = FermConfig(None)
# get all containers
containers = ctx.obj.client.containers()
ctx.obj.containers = [
ctx.obj.client.inspect_container(container['Id'])
for container in containers
]
click.echo(ferm.get_config(ctx.obj))
| apache-2.0 | -8,682,527,728,291,618,000 | 22.13 | 86 | 0.620406 | false |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/export_zOR_classif.py | 1 | 10068 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/18/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
def print_dict_to_file(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s\n" % (key, value[0], value[1]))
def print_dict_to_file2(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,region,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
def print_dict_to_file3(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write('season,state,mn_retro,mn_early\n')
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: nation-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: nation-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
# fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4) | mit | -8,794,785,964,571,561,000 | 58.934524 | 206 | 0.698153 | false |
RoboCupULaval/StrategyIA | ai/GameDomainObjects/ball.py | 1 | 1041 | # Under MIT License, see LICENSE.txt
from typing import Dict
from Util import Position
class Ball:
def __init__(self, position=Position()):
self._position = position
self._velocity = Position()
def update(self, new_dict: Dict):
self.position = new_dict['position']
self.velocity = new_dict['velocity']
def is_moving_fast(self, fast_speed = 600.0): # mm/s
return fast_speed < self.velocity.norm
def is_mobile(self, immobile_speed = 300.0): # mm/s
return immobile_speed < self.velocity.norm
def is_immobile(self):
return not self.is_mobile()
@property
def position(self) -> Position:
return self._position
@position.setter
def position(self, value):
assert isinstance(value, Position)
self._position = value
@property
def velocity(self) -> Position:
return self._velocity
@velocity.setter
def velocity(self, value):
assert isinstance(value, Position)
self._velocity = value
| mit | -1,920,990,554,748,314,000 | 23.209302 | 56 | 0.630163 | false |
RogerRueegg/lvw-young-talents | src/profiles/views.py | 1 | 2796 | from __future__ import unicode_literals
from django.views import generic
from django.shortcuts import get_object_or_404, redirect
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from . import forms
from . import models
import datetime
class ShowProfile(LoginRequiredMixin, generic.TemplateView):
template_name = "profiles/show_profile.html"
http_method_names = ['get']
def get(self, request, *args, **kwargs):
slug = self.kwargs.get('slug')
if slug:
profile = get_object_or_404(models.Profile, slug=slug)
user = profile.user
else:
user = self.request.user
if user == self.request.user:
kwargs["editable"] = True
kwargs["show_user"] = user
return super(ShowProfile, self).get(request, *args, **kwargs)
class EditProfile(LoginRequiredMixin, generic.TemplateView):
template_name = "profiles/edit_profile.html"
http_method_names = ['get', 'post']
def get(self, request, *args, **kwargs):
user = self.request.user
if "user_form" not in kwargs:
kwargs["user_form"] = forms.UserForm(instance=user)
if "profile_form" not in kwargs:
kwargs["profile_form"] = forms.ProfileForm(instance=user.profile)
return super(EditProfile, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
user = self.request.user
user_form = forms.UserForm(request.POST, instance=user)
profile_form = forms.ProfileForm(request.POST,
request.FILES,
instance=user.profile)
if not (user_form.is_valid() and profile_form.is_valid()):
message = ""
if profile_form.errors:
if 'phone_number' in profile_form.errors.keys():
message += "Bitte gibt Deine Natelnummer wie folgt ein: +41791234567. "
if 'bdate' in profile_form.errors.keys():
message += "Bitte gibt das Geburtsdatum wie folgt ein: 2002-01-15 für 15. Januar 2002"
messages.error(request, message)
user_form = forms.UserForm(instance=user)
profile_form = forms.ProfileForm(instance=user.profile)
return super(EditProfile, self).get(request,
user_form=user_form,
profile_form=profile_form)
# Both forms are fine. Time to save!
user_form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
messages.success(request, "Profile details saved!")
return redirect("profiles:show_self")
| mit | 7,446,359,648,485,783,000 | 41.348485 | 106 | 0.598927 | false |
Erotemic/ibeis | super_setup.py | 1 | 26677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Requirements:
pip install gitpython click ubelt
"""
import re
from os.path import exists
from os.path import join
from os.path import dirname
from os.path import abspath
import ubelt as ub
import functools
class ShellException(Exception):
"""
Raised when shell returns a non-zero error code
"""
class DirtyRepoError(Exception):
"""
If the repo is in an unexpected state, its very easy to break things using
automated scripts. To be safe, we don't do anything. We ensure this by
raising this error.
"""
def parse_version(package):
"""
Statically parse the version number from __init__.py
CommandLine:
python -c "import setup; print(setup.parse_version('ovharn'))"
"""
from os.path import dirname, join
import ast
init_fpath = join(dirname(__file__), package, '__init__.py')
with open(init_fpath) as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if target.id == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
class GitURL(object):
"""
Represent and transform git urls between protocols defined in [3]_.
The code in GitURL is largely derived from [1]_ and [2]_.
Credit to @coala and @FriendCode.
Note:
while this code aims to suport protocols defined in [3]_, it is only
tested for specific use cases and therefore might need to be improved.
References:
.. [1] https://github.com/coala/git-url-parse
.. [2] https://github.com/FriendCode/giturlparse.py
.. [3] https://git-scm.com/docs/git-clone#URLS
Example:
>>> self = GitURL('[email protected]:computer-vision/netharn.git')
>>> print(ub.repr2(self.parts()))
>>> print(self.format('ssh'))
>>> print(self.format('https'))
>>> self = GitURL('https://gitlab.kitware.com/computer-vision/netharn.git')
>>> print(ub.repr2(self.parts()))
>>> print(self.format('ssh'))
>>> print(self.format('https'))
"""
SYNTAX_PATTERNS = {
# git allows for a url style syntax
'url': re.compile(r'(?P<transport>\w+://)'
r'((?P<user>\w+[^@]*@))?'
r'(?P<host>[a-z0-9_.-]+)'
r'((?P<port>:[0-9]+))?'
r'/(?P<path>.*\.git)'),
# git allows for ssh style syntax
'ssh': re.compile(r'(?P<user>\w+[^@]*@)'
r'(?P<host>[a-z0-9_.-]+)'
r':(?P<path>.*\.git)'),
}
r"""
Ignore:
# Helper to build the parse pattern regexes
def named(key, regex):
return '(?P<{}>{})'.format(key, regex)
def optional(pat):
return '({})?'.format(pat)
parse_patterns = {}
# Standard url format
transport = named('transport', r'\w+://')
user = named('user', r'\w+[^@]*@')
host = named('host', r'[a-z0-9_.-]+')
port = named('port', r':[0-9]+')
path = named('path', r'.*\.git')
pat = ''.join([transport, optional(user), host, optional(port), '/', path])
parse_patterns['url'] = pat
pat = ''.join([user, host, ':', path])
parse_patterns['ssh'] = pat
print(ub.repr2(parse_patterns))
"""
def __init__(self, url):
self._url = url
self._parts = None
def parts(self):
"""
Parses a GIT URL and returns an info dict.
Returns:
dict: info about the url
Raises:
Exception : if parsing fails
"""
info = {
'syntax': '',
'host': '',
'user': '',
'port': '',
'path': None,
'transport': '',
}
for syntax, regex in self.SYNTAX_PATTERNS.items():
match = regex.search(self._url)
if match:
info['syntax'] = syntax
info.update(match.groupdict())
break
else:
raise Exception('Invalid URL {!r}'.format(self._url))
# change none to empty string
for k, v in info.items():
if v is None:
info[k] = ''
return info
def format(self, protocol):
"""
Change the protocol of the git URL
"""
parts = self.parts()
if protocol == 'ssh':
parts['user'] = 'git@'
url = ''.join([
parts['user'], parts['host'], ':', parts['path']
])
else:
parts['transport'] = protocol + '://'
parts['port'] = ''
parts['user'] = ''
url = ''.join([
parts['transport'], parts['user'], parts['host'],
parts['port'], '/', parts['path']
])
return url
class Repo(ub.NiceRepr):
"""
Abstraction that references a git repository, and is able to manipulate it.
A common use case is to define a `remote` and a `code_dpath`, which lets
you check and ensure that the repo is cloned and on a particular branch.
You can also query its status, and pull, and perform custom git commands.
Args:
*args: name, dpath, code_dpath, remotes, remote, branch
Attributes:
All names listed in args are attributse. In addition, the class also
exposes these derived attributes.
url (URI): where the primary location is
Example:
>>> # Here is a simple example referencing ubelt
>>> from super_setup import *
>>> import ubelt as ub
>>> repo = Repo(
>>> remote='https://github.com/Erotemic/ubelt.git',
>>> code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')),
>>> )
>>> print('repo = {}'.format(repo))
>>> repo.check()
>>> repo.ensure()
>>> repo.check()
>>> repo.status()
>>> repo._cmd('python setup.py build')
>>> repo._cmd('./run_doctests.sh')
repo = <Repo('ubelt')>
>>> # Here is a less simple example referencing ubelt
>>> from super_setup import *
>>> import ubelt as ub
>>> repo = Repo(
>>> name='ubelt-local',
>>> remote='github',
>>> branch='master',
>>> remotes={
>>> 'github': 'https://github.com/Erotemic/ubelt.git',
>>> 'fakemirror': 'https://gitlab.com/Erotemic/ubelt.git',
>>> },
>>> code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')),
>>> )
>>> print('repo = {}'.format(repo))
>>> repo.ensure()
>>> repo._cmd('python setup.py build')
>>> repo._cmd('./run_doctests.sh')
"""
def __init__(repo, **kwargs):
repo.name = kwargs.pop('name', None)
repo.dpath = kwargs.pop('dpath', None)
repo.code_dpath = kwargs.pop('code_dpath', None)
repo.remotes = kwargs.pop('remotes', None)
repo.remote = kwargs.pop('remote', None)
repo.branch = kwargs.pop('branch', 'master')
repo._logged_lines = []
repo._logged_cmds = []
if repo.remote is None:
if repo.remotes is None:
raise ValueError('must specify some remote')
else:
if len(repo.remotes) > 1:
raise ValueError('remotes are ambiguous, specify one')
else:
repo.remote = ub.peek(repo.remotes)
else:
if repo.remotes is None:
_default_remote = 'origin'
repo.remotes = {
_default_remote: repo.remote
}
repo.remote = _default_remote
repo.url = repo.remotes[repo.remote]
if repo.name is None:
suffix = repo.url.split('/')[-1]
repo.name = suffix.split('.git')[0]
if repo.dpath is None:
repo.dpath = join(repo.code_dpath, repo.name)
repo.pkg_dpath = join(repo.dpath, repo.name)
for path_attr in ['dpath', 'code_dpath']:
path = getattr(repo, path_attr)
if path is not None:
setattr(repo, path_attr, ub.expandpath(path))
repo.verbose = kwargs.pop('verbose', 3)
if kwargs:
raise ValueError('unknown kwargs = {}'.format(kwargs.keys()))
repo._pygit = None
def set_protocol(self, protocol):
"""
Changes the url protocol to either ssh or https
Args:
protocol (str): can be ssh or https
"""
gurl = GitURL(self.url)
self.url = gurl.format(protocol)
def info(repo, msg):
repo._logged_lines.append(('INFO', 'INFO: ' + msg))
if repo.verbose >= 1:
print(msg)
def debug(repo, msg):
repo._logged_lines.append(('DEBUG', 'DEBUG: ' + msg))
if repo.verbose >= 1:
print(msg)
def _getlogs(repo):
return '\n'.join([t[1] for t in repo._logged_lines])
def __nice__(repo):
return '{}, branch={}'.format(repo.name, repo.branch)
def _cmd(repo, command, cwd=ub.NoParam, verbose=ub.NoParam):
if verbose is ub.NoParam:
verbose = repo.verbose
if cwd is ub.NoParam:
cwd = repo.dpath
repo._logged_cmds.append((command, cwd))
repo.debug('Run {!r} in {!r}'.format(command, cwd))
info = ub.cmd(command, cwd=cwd, verbose=verbose)
if verbose:
if info['out'].strip():
repo.info(info['out'])
if info['err'].strip():
repo.debug(info['err'])
if info['ret'] != 0:
raise ShellException(ub.repr2(info))
return info
@property
# @ub.memoize_property
def pygit(repo):
""" pip install gitpython """
import git as gitpython
if repo._pygit is None:
repo._pygit = gitpython.Repo(repo.dpath)
return repo._pygit
def develop(repo):
devsetup_script_fpath = join(repo.dpath, 'run_developer_setup.sh')
if not exists(devsetup_script_fpath):
raise AssertionError('Assume we always have run_developer_setup.sh: repo={!r}'.format(repo))
repo._cmd(devsetup_script_fpath, cwd=repo.dpath)
def doctest(repo):
devsetup_script_fpath = join(repo.dpath, 'run_doctests.sh')
if not exists(devsetup_script_fpath):
raise AssertionError('Assume we always have run_doctests.sh: repo={!r}'.format(repo))
repo._cmd(devsetup_script_fpath, cwd=repo.dpath)
def clone(repo):
if exists(repo.dpath):
raise ValueError('cannot clone into non-empty directory')
args = '--recursive'
if repo.branch is not None:
args += ' -b {}'.format(repo.branch)
command = 'git clone {args} {url} {dpath}'.format(args=args, url=repo.url, dpath=repo.dpath)
repo._cmd(command, cwd=repo.code_dpath)
def _assert_clean(repo):
if repo.pygit.is_dirty():
raise DirtyRepoError('The repo={} is dirty'.format(repo))
def check(repo):
repo.ensure(dry=True)
def versions(repo):
"""
Print current version information
"""
fmtkw = {}
fmtkw['pkg'] = parse_version(repo.pkg_dpath) + ','
fmtkw['sha1'] = repo._cmd('git rev-parse HEAD', verbose=0)['out'].strip()
try:
fmtkw['tag'] = repo._cmd('git describe --tags', verbose=0)['out'].strip() + ','
except ShellException:
fmtkw['tag'] = '<None>,'
fmtkw['branch'] = repo.pygit.active_branch.name + ','
fmtkw['repo'] = repo.name + ','
repo.info('repo={repo:<14} pkg={pkg:<12} tag={tag:<18} branch={branch:<10} sha1={sha1}'.format(
**fmtkw))
def ensure_clone(repo):
if exists(repo.dpath):
repo.debug('No need to clone existing repo={}'.format(repo))
else:
repo.debug('Clone non-existing repo={}'.format(repo))
repo.clone()
def ensure(repo, dry=False):
"""
Ensure that the repo is checked out on your local machine, that the
correct branch is checked out, and the upstreams are targeting the
correct remotes.
"""
if repo.verbose > 0:
if dry:
repo.debug(ub.color_text('Checking {}'.format(repo), 'blue'))
else:
repo.debug(ub.color_text('Ensuring {}'.format(repo), 'blue'))
if not exists(repo.dpath):
repo.debug('NEED TO CLONE {}'.format(repo))
if dry:
return
repo.ensure_clone()
repo._assert_clean()
# Ensure all registered remotes exist
for remote_name, remote_url in repo.remotes.items():
try:
remote = repo.pygit.remotes[remote_name]
have_urls = list(remote.urls)
if remote_url not in have_urls:
print('WARNING: REMOTE NAME EXIST BUT URL IS NOT {}. '
'INSTEAD GOT: {}'.format(remote_url, have_urls))
except (IndexError):
try:
print('NEED TO ADD REMOTE {}->{} FOR {}'.format(
remote_name, remote_url, repo))
if not dry:
repo._cmd('git remote add {} {}'.format(remote_name, remote_url))
except ShellException:
if remote_name == repo.remote:
# Only error if the main remote is not available
raise
# Ensure we have the right remote
try:
remote = repo.pygit.remotes[repo.remote]
except IndexError:
if not dry:
raise AssertionError('Something went wrong')
else:
remote = None
if remote is not None:
try:
if not remote.exists():
raise IndexError
else:
repo.debug('The requested remote={} name exists'.format(remote))
except IndexError:
repo.debug('WARNING: remote={} does not exist'.format(remote))
else:
if remote.exists():
repo.debug('Requested remote does exists')
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
repo.info('Branch name not found in local remote. Attempting to fetch')
if dry:
repo.info('dry run, not fetching')
else:
repo._cmd('git fetch {}'.format(remote.name))
repo.info('Fetch was successful')
else:
repo.debug('Requested remote does NOT exist')
# Ensure the remote points to the right place
if repo.url not in list(remote.urls):
repo.debug('WARNING: The requested url={} disagrees with remote urls={}'.format(repo.url, list(remote.urls)))
if dry:
repo.info('Dry run, not updating remote url')
else:
repo.info('Updating remote url')
repo._cmd('git remote set-url {} {}'.format(repo.remote, repo.url))
# Ensure we are on the right branch
if repo.branch != repo.pygit.active_branch.name:
repo.debug('NEED TO SET BRANCH TO {} for {}'.format(repo.branch, repo))
try:
repo._cmd('git checkout {}'.format(repo.branch))
except ShellException:
repo.debug('Checkout failed. Branch name might be ambiguous. Trying again')
try:
repo._cmd('git checkout -b {} {}/{}'.format(repo.branch, repo.remote, repo.branch))
except ShellException:
raise Exception('does the branch exist on the remote?')
tracking_branch = repo.pygit.active_branch.tracking_branch()
if tracking_branch is None or tracking_branch.remote_name != repo.remote:
repo.debug('NEED TO SET UPSTREAM FOR FOR {}'.format(repo))
try:
remote = repo.pygit.remotes[repo.remote]
if not remote.exists():
raise IndexError
except IndexError:
repo.debug('WARNING: remote={} does not exist'.format(remote))
else:
if remote.exists():
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
if dry:
repo.info('Branch name not found in local remote. Dry run, use ensure to attempt to fetch')
else:
repo.info('Branch name not found in local remote. Attempting to fetch')
repo._cmd('git fetch {}'.format(repo.remote))
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
raise Exception('Branch name still does not exist')
if not dry:
repo._cmd('git branch --set-upstream-to={remote}/{branch} {branch}'.format(
remote=repo.remote, branch=repo.branch
))
else:
repo.info('Would attempt to set upstream')
# Print some status
repo.debug(' * branch = {} -> {}'.format(
repo.pygit.active_branch.name,
repo.pygit.active_branch.tracking_branch(),
))
def pull(repo):
repo._assert_clean()
repo._cmd('git pull')
def status(repo):
repo._cmd('git status')
def worker(repo, funcname, kwargs):
repo.verbose = 0
func = getattr(repo, funcname)
func(**kwargs)
return repo
class RepoRegistry(ub.NiceRepr):
def __init__(registery, repos):
registery.repos = repos
def __nice__(registery):
return ub.repr2(registery.repos, si=1, nl=1)
def apply(registery, funcname, num_workers=0, **kwargs):
print(ub.color_text('--- APPLY {} ---'.format(funcname), 'white'))
print(' * num_workers = {!r}'.format(num_workers))
if num_workers == 0:
processed_repos = []
for repo in registery.repos:
print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
try:
getattr(repo, funcname)(**kwargs)
except DirtyRepoError:
print(ub.color_text('Ignoring dirty repo={}'.format(repo), 'red'))
processed_repos.append(repo)
else:
from concurrent import futures
# with futures.ThreadPoolExecutor(max_workers=num_workers) as pool:
with futures.ProcessPoolExecutor(max_workers=num_workers) as pool:
tasks = []
for i, repo in enumerate(registery.repos):
future = pool.submit(worker, repo, funcname, kwargs)
future.repo = repo
tasks.append(future)
processed_repos = []
for future in futures.as_completed(tasks):
repo = future.repo
print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
try:
repo = future.result()
except DirtyRepoError:
print(ub.color_text('Ignoring dirty repo={}'.format(repo), 'red'))
else:
print(repo._getlogs())
processed_repos.append(repo)
print(ub.color_text('--- FINISHED APPLY {} ---'.format(funcname), 'white'))
SHOW_CMDLOG = 1
if SHOW_CMDLOG:
print('LOGGED COMMANDS')
import os
ORIG_CWD = MY_CWD = os.getcwd()
for repo in processed_repos:
print('# --- For repo = {!r} --- '.format(repo))
for t in repo._logged_cmds:
cmd, cwd = t
if cwd is None:
cwd = os.get_cwd()
if cwd != MY_CWD:
print('cd ' + ub.shrinkuser(cwd))
MY_CWD = cwd
print(cmd)
print('cd ' + ub.shrinkuser(ORIG_CWD))
def determine_code_dpath():
"""
Returns a good place to put the code for the internal dependencies.
Returns:
PathLike: the directory where you want to store your code
In order, the methods used for determing this are:
* the `--codedpath` command line flag (may be undocumented in the CLI)
* the `--codedir` command line flag (may be undocumented in the CLI)
* the CODE_DPATH environment variable
* the CODE_DIR environment variable
* the directory above this script (e.g. if this is in ~/code/repo/super_setup.py then code dir resolves to ~/code)
* the user's ~/code directory.
"""
import os
candidates = [
ub.argval('--codedir', default=''),
ub.argval('--codedpath', default=''),
os.environ.get('CODE_DPATH', ''),
os.environ.get('CODE_DIR', ''),
]
valid = [c for c in candidates if c != '']
if len(valid) > 0:
code_dpath = valid[0]
else:
try:
# This file should be in the top level of a repo, the directory from
# this file should be the code directory.
this_fpath = abspath(__file__)
code_dpath = abspath(dirname(dirname(this_fpath)))
except NameError:
code_dpath = ub.expandpath('~/code')
if not exists(code_dpath):
code_dpath = ub.expandpath(code_dpath)
# if CODE_DIR and not exists(CODE_DIR):
# import warnings
# warnings.warn('environment variable CODE_DIR={!r} was defined, but does not exist'.format(CODE_DIR))
if not exists(code_dpath):
raise Exception(ub.codeblock(
'''
Please specify a correct code_dir using the CLI or ENV.
code_dpath={!r} does not exist.
'''.format(code_dpath)))
return code_dpath
def make_netharn_registry():
code_dpath = determine_code_dpath()
CommonRepo = functools.partial(Repo, code_dpath=code_dpath)
repos = [
# The util libs
CommonRepo(
name='utool', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/utool.git'},
),
CommonRepo(
name='vtool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/vtool_ibeis.git'},
),
CommonRepo(
name='dtool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/dtool_ibeis.git'},
),
CommonRepo(
name='plottool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/plottool_ibeis.git'},
),
CommonRepo(
name='guitool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/guitool_ibeis.git'},
),
CommonRepo(
name='ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': '[email protected]:Erotemic/ibeis.git'},
),
]
registery = RepoRegistry(repos)
return registery
def main():
import click
registery = make_netharn_registry()
only = ub.argval('--only', default=None)
if only is not None:
only = only.split(',')
registery.repos = [repo for repo in registery.repos if repo.name in only]
num_workers = int(ub.argval('--workers', default=8))
if ub.argflag('--serial'):
num_workers = 0
protocol = ub.argval('--protocol', None)
if ub.argflag('--https'):
protocol = 'https'
if ub.argflag('--http'):
protocol = 'http'
if ub.argflag('--ssh'):
protocol = 'ssh'
if protocol is not None:
for repo in registery.repos:
repo.set_protocol(protocol)
default_context_settings = {
'help_option_names': ['-h', '--help'],
'allow_extra_args': True,
'ignore_unknown_options': True}
@click.group(context_settings=default_context_settings)
def cli_group():
pass
@cli_group.add_command
@click.command('pull', context_settings=default_context_settings)
def pull():
registery.apply('pull', num_workers=num_workers)
@cli_group.add_command
@click.command('ensure', context_settings=default_context_settings)
def ensure():
"""
Ensure is the live run of "check".
"""
registery.apply('ensure', num_workers=num_workers)
@cli_group.add_command
@click.command('ensure_clone', context_settings=default_context_settings)
def ensure_clone():
registery.apply('ensure_clone', num_workers=num_workers)
@cli_group.add_command
@click.command('check', context_settings=default_context_settings)
def check():
"""
Check is just a dry run of "ensure".
"""
registery.apply('check', num_workers=num_workers)
@cli_group.add_command
@click.command('status', context_settings=default_context_settings)
def status():
registery.apply('status', num_workers=num_workers)
@cli_group.add_command
@click.command('develop', context_settings=default_context_settings)
def develop():
registery.apply('develop', num_workers=0)
@cli_group.add_command
@click.command('doctest', context_settings=default_context_settings)
def doctest():
registery.apply('doctest')
@cli_group.add_command
@click.command('versions', context_settings=default_context_settings)
def versions():
registery.apply('versions')
cli_group()
if __name__ == '__main__':
main()
| apache-2.0 | -6,734,773,674,303,073,000 | 34.009186 | 125 | 0.52922 | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/toon/GroupPanel.py | 1 | 18189 | from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.nametag import NametagGlobals
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from toontown.toon import ToonAvatarPanel
from toontown.toontowngui import TTDialog
class GroupPanel(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('GroupPanel')
def __init__(self, boardingParty):
self.boardingParty = boardingParty
self.leaderId = self.boardingParty.getGroupLeader(localAvatar.doId)
self.elevatorIdList = self.boardingParty.getElevatorIdList()
self.frame = None
self.confirmQuitDialog = None
self.goButton = None
self.destScrollList = None
self.destFrame = None
self.goingToLabel = None
self.destIndexSelected = 0
self.__load()
self.ignore('stickerBookEntered')
self.accept('stickerBookEntered', self.__forceHide)
self.ignore('stickerBookExited')
self.accept('stickerBookExited', self.__forceShow)
return
def cleanup(self):
base.setCellsAvailable(base.leftCells, 1)
self.quitButton.destroy()
self.hideButton.destroy()
self.showButton.destroy()
self.scrollList.destroy()
if self.goButton:
self.goButton.destroy()
self.goButton = None
if self.destScrollList:
self.destScrollList.destroy()
self.destScrollList = None
if self.destFrame:
self.destFrame.destroy()
self.destFrame = None
if self.goingToLabel:
self.goingToLabel.destroy()
self.goingToLabel = None
if self.frame:
self.frame.destroy()
self.frame = None
self.leaveButton = None
self.boardingParty = None
self.ignoreAll()
return
def __load(self):
self.guiBg = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_groupListBg')
self.__defineConstants()
if self.boardingParty.maxSize == 4:
bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop_half')
bgImageZPos = 0.14
frameZPos = -0.121442
quitButtonZPos = -0.019958
else:
bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop')
bgImageZPos = 0
frameZPos = 0.0278943
quitButtonZPos = -0.30366
guiButtons = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_status')
self.frame = DirectFrame(parent=base.a2dLeftCenter, relief=None, image=bgImage, image_scale=(0.5, 1, 0.5), image_pos=(0, 0, bgImageZPos), textMayChange=1, pos=(0.32, 0, 0))
self.frameBounds = self.frame.getBounds()
leaveButtonGui = loader.loadModel('phase_3.5/models/gui/tt_m_gui_brd_leaveBtn')
leaveImageList = (leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveDown'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveHover'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'))
self.leaderButtonImage = guiButtons.find('**/tt_t_gui_brd_statusLeader')
self.availableButtonImage = guiButtons.find('**/tt_t_gui_brd_statusOn')
self.battleButtonImage = guiButtons.find('**/tt_t_gui_brd_statusBattle')
if localAvatar.doId == self.leaderId:
quitText = TTLocalizer.QuitBoardingPartyLeader
else:
quitText = TTLocalizer.QuitBoardingPartyNonLeader
self.disabledOrangeColor = Vec4(1, 0.5, 0.25, 0.9)
self.quitButton = DirectButton(parent=self.frame, relief=None, image=leaveImageList, image_scale=0.065, command=self.__handleLeaveButton, text=('',
quitText,
quitText,
''), text_scale=0.06, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0.045, 0.0), text_align=TextNode.ALeft, pos=(0.223, 0, quitButtonZPos), image3_color=self.disabledOrangeColor)
arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_arrow')
hideImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover'))
showImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover'))
self.hideButton = DirectButton(parent=base.a2dLeftCenter, relief=None, text_pos=(0, 0.15), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), text_shadow=Vec4(1, 1, 1, 1), image=hideImageList, image_scale=(-0.35, 1, 0.5), pos=(0.04, 0, 0.03), scale=1.05, command=self.hide)
self.showButton = DirectButton(parent=base.a2dLeftCenter, relief=None, text=('', TTLocalizer.BoardingGroupShow, TTLocalizer.BoardingGroupShow), text_pos=(0.03, 0), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), image=showImageList, image_scale=(0.35, 1, 0.5), pos=(0.04, 0, 0.03), scale=1.05, command=self.show)
self.showButton.hide()
self.frame.show()
self.__makeAvatarNameScrolledList()
if localAvatar.doId == self.leaderId:
self.__makeDestinationScrolledList()
else:
self.__makeDestinationFrame()
self.__makeGoingToLabel()
self.accept('updateGroupStatus', self.__checkGroupStatus)
self.accept('ToonBattleIdUpdate', self.__possibleGroupUpdate)
base.setCellsAvailable([base.leftCells[1], base.leftCells[2]], 0)
if self.boardingParty.isGroupLeader(localAvatar.doId):
base.setCellsAvailable([base.leftCells[0]], 0)
self.__addTestNames(self.boardingParty.maxSize)
self.guiBg.removeNode()
guiButtons.removeNode()
leaveButtonGui.removeNode()
arrowGui.removeNode()
return
def __defineConstants(self):
self.forcedHidden = False
self.textFgcolor = Vec4(0.0, 0.6, 0.2, 1.0)
self.textBgRolloverColor = Vec4(1, 1, 0, 1)
self.textBgDownColor = Vec4(0.5, 0.9, 1, 1)
self.textBgDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
def __handleLeaveButton(self):
messenger.send('wakeup')
if not base.cr.playGame.getPlace().getState() == 'elevator':
self.confirmQuitDialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.QuitBoardingPartyConfirm, command=self.__confirmQuitCallback)
self.confirmQuitDialog.show()
def __confirmQuitCallback(self, value):
if self.confirmQuitDialog:
self.confirmQuitDialog.destroy()
self.confirmQuitDialog = None
if value > 0:
if self.boardingParty:
self.boardingParty.requestLeave()
return
def __handleGoButton(self):
offset = self.destScrollList.getSelectedIndex()
elevatorId = self.elevatorIdList[offset]
self.boardingParty.requestGoToFirstTime(elevatorId)
def __handleCancelGoButton(self):
self.boardingParty.cancelGoToElvatorDest()
def __checkGroupStatus(self):
if not self.boardingParty:
return
self.notify.debug('__checkGroupStatus %s' % self.boardingParty.getGroupMemberList(localAvatar.doId))
myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId)
self.scrollList.removeAndDestroyAllItems(refresh=0)
if myMemberList:
for avId in myMemberList:
avatarButton = self.__getAvatarButton(avId)
if avatarButton:
self.scrollList.addItem(avatarButton, refresh=0)
self.scrollList.refresh()
def __possibleGroupUpdate(self, avId):
self.notify.debug('GroupPanel __possibleGroupUpdate')
if not self.boardingParty:
return
myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId)
if avId in myMemberList:
self.__checkGroupStatus()
def __makeAvatarNameScrolledList(self):
friendsListGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.scrollList = DirectScrolledList(parent=self.frame, relief=None, incButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'),
friendsListGui.find('**/FndsLst_ScrollDN'),
friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'),
friendsListGui.find('**/FndsLst_ScrollUp')), incButton_pos=(0.0, 0.0, -0.35), incButton_image1_color=Vec4(1.0, 0.9, 0.4, 0), incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), incButton_scale=(1.0, 1.0, -1.0), incButton_relief=None, decButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'),
friendsListGui.find('**/FndsLst_ScrollDN'),
friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'),
friendsListGui.find('**/FndsLst_ScrollUp')), decButton_pos=(0.0, 0.0, 0.1), decButton_image1_color=Vec4(1.0, 1.0, 0.6, 0), decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), decButton_relief=None, itemFrame_pos=(-0.195, 0.0, 0.185), itemFrame_borderWidth=(0.1, 0.1), numItemsVisible=8, itemFrame_scale=1.0, forceHeight=0.07, items=[], pos=(0, 0, 0.075))
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.235, 0, 0)))
clipNP = self.scrollList.attachNewNode(clipper)
self.scrollList.setClipPlane(clipNP)
friendsListGui.removeNode()
return
def __makeDestinationScrolledList(self):
arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoArrow')
incrementImageList = (arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoDown'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoHover'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'))
if self.boardingParty.maxSize == 4:
zPos = -0.177083
else:
zPos = -0.463843
bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_leader')
self.destScrollList = DirectScrolledList(
parent=self.frame,
relief=None,
image=bottomImage,
image_scale=(0.5, 1, 0.5),
incButton_image=incrementImageList,
incButton_pos=(0.217302, 0, 0.07),
incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5),
incButton_scale=(-0.5, 1, 0.5),
incButton_relief=None,
incButtonCallback=self.__informDestChange,
decButton_image=incrementImageList,
decButton_pos=(-0.217302, 0, 0.07),
decButton_scale=(0.5, 1, 0.5),
decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5),
decButton_relief=None,
decButtonCallback=self.__informDestChange,
itemFrame_pos=(0, 0, 0.06),
itemFrame_borderWidth=(0.1, 0.1),
numItemsVisible=1,
itemFrame_scale=TTLocalizer.GPdestScrollList,
forceHeight=0.07,
items=[],
pos=(0, 0, zPos),
scrollSpeed=0.1)
arrowGui.removeNode()
self.__addDestNames()
self.__makeGoButton()
return
def __addDestNames(self):
for i in xrange(len(self.elevatorIdList)):
destName = self.__getDestName(i)
self.destScrollList.addItem(destName, refresh=0)
self.destScrollList.refresh()
def __getDestName(self, offset):
elevatorId = self.elevatorIdList[offset]
elevator = base.cr.doId2do.get(elevatorId)
if elevator:
destName = elevator.getDestName()
return destName
def __makeDestinationFrame(self):
destName = self.__getDestName(self.destIndexSelected)
if self.boardingParty.maxSize == 4:
zPos = -0.12
else:
zPos = -0.404267
bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_nonLeader')
self.destFrame = DirectFrame(parent=self.frame, relief=None, image=bottomImage, image_scale=(0.5, 1, 0.5), text=destName, text_align=TextNode.ACenter, text_scale=TTLocalizer.GPdestFrame, pos=(0, 0, zPos))
return
def __makeGoButton(self):
goGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoBtn')
self.goImageList = (goGui.find('**/tt_t_gui_brd_gotoUp'),
goGui.find('**/tt_t_gui_brd_gotoDown'),
goGui.find('**/tt_t_gui_brd_gotoHover'),
goGui.find('**/tt_t_gui_brd_gotoUp'))
self.cancelGoImageList = (goGui.find('**/tt_t_gui_brd_cancelGotoUp'),
goGui.find('**/tt_t_gui_brd_cancelGotoDown'),
goGui.find('**/tt_t_gui_brd_cancelGotoHover'),
goGui.find('**/tt_t_gui_brd_cancelGotoUp'))
if self.boardingParty.maxSize == 4:
zPos = -0.028
zPos = -0.0360483
else:
zPos = -0.0353787
self.goButton = DirectButton(parent=self.destScrollList, relief=None, image=self.goImageList, image_scale=(0.48, 1, 0.48), command=self.__handleGoButton, text=('',
TTLocalizer.BoardingGo,
TTLocalizer.BoardingGo,
''), text_scale=TTLocalizer.GPgoButton, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.12), pos=(-0.003, 0, zPos))
goGui.removeNode()
return
def __getAvatarButton(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return None
toonName = toon.getName()
inBattle = 0
buttonImage = self.availableButtonImage
if toon.battleId:
inBattle = 1
buttonImage = self.battleButtonImage
if avId == localAvatar.doId:
self.__forceHide()
else:
if avId == self.leaderId:
buttonImage = self.leaderButtonImage
if avId == localAvatar.doId:
self.__forceShow()
return DirectButton(parent=self.frame, relief=None, image=buttonImage, image_scale=(0.06, 1.0, 0.06), text=toonName, text_align=TextNode.ALeft, text_wordwrap=16, text_scale=0.04, text_pos=(0.05, -0.015), text_fg=self.textFgcolor, text1_bg=self.textBgDownColor, text2_bg=self.textBgRolloverColor, text3_fg=self.textBgDisabledColor, pos=(0, 0, 0.2), command=self.__openToonAvatarPanel, extraArgs=[toon, avId])
def __openToonAvatarPanel(self, avatar, avId):
if avId != localAvatar.doId and avatar:
messenger.send('clickedNametag', [avatar])
def __addTestNames(self, num):
for i in xrange(num):
avatarButton = self.__getAvatarButton(localAvatar.doId)
self.scrollList.addItem(avatarButton, refresh=0)
self.scrollList.refresh()
def __isForcedHidden(self):
if self.forcedHidden and self.frame.isHidden():
return True
else:
return False
def hide(self):
self.frame.hide()
self.hideButton.hide()
self.showButton.show()
def show(self):
self.frame.show()
self.forcedHidden = False
self.showButton.hide()
self.hideButton.show()
def __forceHide(self):
if not self.frame.isHidden():
self.forcedHidden = True
self.hide()
def __forceShow(self):
if self.__isForcedHidden():
self.show()
def __informDestChange(self):
self.boardingParty.informDestChange(self.destScrollList.getSelectedIndex())
def changeDestination(self, offset):
if localAvatar.doId != self.leaderId:
self.destIndexSelected = offset
if self.destFrame:
self.destFrame['text'] = self.__getDestName(self.destIndexSelected)
def scrollToDestination(self, offset):
if localAvatar.doId == self.leaderId:
if self.destScrollList:
self.destIndexSelected = offset
self.destScrollList.scrollTo(offset)
def __makeGoingToLabel(self):
if self.boardingParty.maxSize == 4:
zPos = -0.0466546
else:
zPos = -0.331731
self.goingToLabel = DirectLabel(parent=self.frame, relief=None, text=TTLocalizer.BoardingGoingTo, text_scale=0.045, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), pos=(-0.1966, 0, zPos))
return
def disableQuitButton(self):
if self.quitButton and not self.quitButton.isEmpty():
self.quitButton['state'] = DGG.DISABLED
def enableQuitButton(self):
if self.quitButton and not self.quitButton.isEmpty():
self.quitButton['state'] = DGG.NORMAL
def disableGoButton(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['state'] = DGG.DISABLED
self.goButton['image_color'] = Vec4(1, 1, 1, 0.4)
def enableGoButton(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['state'] = DGG.NORMAL
self.goButton['image_color'] = Vec4(1, 1, 1, 1)
def disableDestinationScrolledList(self):
if self.destScrollList and not self.destScrollList.isEmpty():
self.destScrollList.incButton['state'] = DGG.DISABLED
self.destScrollList.decButton['state'] = DGG.DISABLED
def enableDestinationScrolledList(self):
if self.destScrollList and not self.destScrollList.isEmpty():
self.destScrollList.incButton['state'] = DGG.NORMAL
self.destScrollList.decButton['state'] = DGG.NORMAL
def changeGoToCancel(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['image'] = self.cancelGoImageList
self.goButton['text'] = (TTLocalizer.BoardingCancelGo,
TTLocalizer.BoardingCancelGo,
TTLocalizer.BoardingCancelGo,
'')
self.goButton['command'] = self.__handleCancelGoButton
def changeCancelToGo(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['image'] = self.goImageList
self.goButton['text'] = ('',
TTLocalizer.BoardingGo,
TTLocalizer.BoardingGo,
'')
self.goButton['command'] = self.__handleGoButton
| mit | 558,389,024,408,593,340 | 45.164975 | 415 | 0.630601 | false |
nephomaniac/nephoria | nephoria/testcases/boto/ec2/network/net_tests_classic.py | 1 | 95035 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author:
__author__ = '[email protected]'
'''
Test case class to test points of network security groups
See individual test descriptions for test objectives.
test1:
Definition:
Create test instances within each zone within security group1. This security group is authorized for
ssh access from 0.0.0.0/0.
This test attempts the following:
-To run an instance in each zone and confirm it reaches 'running' state.
-Confirm the instance is ping-able from the cc within a given timeout
-Establish and verify an ssh session directly from the local machine running this test.
-Place ssh key on instance for later use
-Add instance to global 'group1_instances'
test2:
Definition:
This test attempts to create an instance in each within security group2 which should not
be authorized for any remote access (outside of the CC).
The test attempts the following:
-To run an instance in each zone and confirm it reaches 'running' state.
-Confirm the instance is ping-able from the cc within a given timeout
-Establish and verify an ssh session using the cc as a proxy.
-Place ssh key on instance for later use
-Add instance to global 'group2_instances'
test3:
Definition:
This test attempts to set up security group rules between group1 and group2 to authorize group2 access
from group1. If use_cidr is True security groups will be setup using cidr notication ip/mask for each instance in
group1, otherwise the entire source group 1 will authorized.
the group will be
Test attempts to:
-Authorize security groups for inter group private ip access.
-Iterate through each zone and attempt to ssh from an instance in group1 to an instance in group2 over their
private ips.
test4:
Definition:
Test attempts to verify that the local machine cannot ssh to the instances within group2 which is not authorized
for ssh access from this source.
test5 (Multi-zone/cluster env):
Definition:
This test attempts to check connectivity for instances in the same security group, but in different zones.
Note: This test requires the CC have tunnelling enabled, or the CCs in each zone be on same
layer 2 network segment.
Test attempts to:
-Iterate through each zone and attempt to ssh from an instance in group1 to an instance in a separate zone
but same security group1 over their private ips.
test 6 (Multi-zone/cluster env):
Definition:
This test attempts to set up security group rules between group1 and group2 to authorize group2 access
from group1 across different zones.
If no_cidr is True security groups will be setup using cidr notication ip/mask for each instance in
group1, otherwise the entire source group 1 will authorized.
the group will be
Note: This test requires the CC have tunnelling enabled, or the CCs in each zone be on same
layer 2 network segment.
Test attempts to:
-Authorize security groups for inter group private ip access.
-Iterate through each zone and attempt to ssh from an instance in group1 to an instance in group2 over their
private ips.
'''
#todo: Make use of CC optional so test can be run with only creds and non-sys_admin user.
# CC only provides additional point of debug so can be removed from test for non-euca testing
#todo: Allow test to run with an sys_admin and non-sys_admin account, so debug can be provided through sys_admin and test can
# be run under non-sys_admin if desired.
from boto.ec2.instance import Instance
from paramiko import SSHException
from nephoria.aws.ec2.ec2ops import EC2ops
from nephoria.testcase_utils.cli_test_runner import CliTestRunner, SkipTestException
from nephoria.testcase_utils import wait_for_result, WaitForResultException
from nephoria.testcontroller import TestController
from nephoria.aws.ec2.euinstance import EuInstance
from cloud_utils.net_utils.sshconnection import SshConnection
from cloud_utils.net_utils.sshconnection import CommandExitCodeException, CommandTimeoutException
from cloud_utils.log_utils import red
from cloud_admin.backends.network.midget import Midget
from boto.exception import EC2ResponseError
from cloud_utils.net_utils import test_port_status
from cloud_utils.log_utils import get_traceback
import copy
import socket
import time
import os
import re
import sys
class TestZone():
def __init__(self, zonename):
self.name = zonename
self.zone = zonename
self.test_instance_group1 = None
self.test_instance_group2 = None
class MidoError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NetTestsClassic(CliTestRunner):
'''
self._vpc_backend = None
'''
@property
def subnet_id(self):
if hasattr(self.args, 'subnet_id'):
return self.args.subnet_id
return None
@property
def test_controller(self):
tc = getattr(self, '_test_controller', None)
if not tc:
clc_ip = self.args.clc
clc_pw = self.args.password
test_account = self.args.test_account
test_user = self.args.test_user
log_level = getattr(self.args, 'log_level', 'DEBUG')
tc = TestController(hostname=clc_ip, password=clc_pw, log_level=log_level,
clouduser_name=test_user, clouduser_account=test_account)
setattr(self, '_test_controller', tc)
return tc
@test_controller.setter
def test_controller(self, value):
if value is None or isinstance(value, TestController):
setattr(self, '_test_controller', value)
else:
raise ValueError('Can only set testcontroller to type TestController or None, '
'got:"{0}/{1}"'.format(value, type(value)))
@property
def user(self):
return self.test_controller.user
@property
def admin(self):
return self.test_controller.admin
@property
def sysadmin(self):
return self.test_controller.sysadmin
@property
def keypair(self):
kp = getattr(self, '_keypair', None)
if not kp:
try:
keys = self.user.ec2.get_all_current_local_keys()
if keys:
kp = keys[0]
else:
kp = self.user.ec2.create_keypair_and_localcert(
"{0}_key_{1}".format(self.name, time.time()))
setattr(self, '_keypair', kp)
except Exception, ke:
raise Exception("Failed to find/create a keypair, error:" + str(ke))
return kp
@property
def zones(self):
zones = getattr(self, '_zones', None)
if not zones:
### Create local zone list to run nephoria_unit_tests in
if self.args.zone:
zones = str(self.args.zone).replace(',',' ')
zones = zones.split()
else:
zones = self.user.ec2.get_zones()
if not zones:
raise RuntimeError('No zones found to run this test?')
self.log.debug('Running test against zones:' + ",".join(zones))
setattr(self, '_zones', zones)
return zones
def setup_test_security_groups(self):
### Add and authorize security groups
group1 = self.group1
group2 = self.group2
if self.group1:
try:
self.group1 = self.user.ec2.get_security_group(id=self.group1.id)
except EC2ResponseError as ER:
if ER.status == 400:
self.group1 = None
else:
raise ER
if self.group2:
try:
self.group2 = self.user.ec2.get_security_group(id=self.group2.id)
except EC2ResponseError as ER:
if ER.status == 400:
self.group2 = None
else:
raise ER
if not self.group1:
self.log.debug("Creating group1..")
self.group1 = self.user.ec2.add_group(str(self.name) + "_group1_" + str(time.time()))
self.log.debug("Authorize ssh for group1 from '0.0.0.0/0'")
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp',
cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, protocol='icmp',port='-1',
cidr_ip='0.0.0.0/0')
if not self.group2:
self.log.debug("Creating group2, will authorize later from rules within test methods..")
self.group2 = self.user.ec2.add_group(str(self.name) + "_group2_" + str(time.time()))
self.user.ec2.authorize_group(self.group2, protocol='icmp', port='-1',
cidr_ip='0.0.0.0/0')
@property
def group1(self):
g1 = getattr(self, '_group1', None)
if not g1:
### Add and authorize securtiy groups
self.log.debug("Creating group1...")
g1 = self.user.ec2.add_group(str(self.name) + "_group1_" + str(time.time()))
self.log.debug("Authorize ssh for group1 from '0.0.0.0/0'")
self.user.ec2.authorize_group(g1, port=22, protocol='tcp',
cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(g1, port=-1, protocol='icmp',
cidr_ip='0.0.0.0/0')
setattr(self, '_group1', g1)
return g1
@group1.setter
def group1(self, value):
setattr(self, '_group1', value)
@property
def group2(self):
g2 = getattr(self, '_group2', None)
if not g2:
self.log.debug("Creating group2, will authorize later from rules "
"within test methods...")
g2 = self.user.ec2.add_group(str(self.name) + "_group2_" + str(time.time()))
self.user.ec2.authorize_group(g2, port=-1, protocol='icmp',
cidr_ip='0.0.0.0/0')
setattr(self, '_group2', g2)
return g2
@group2.setter
def group2(self, value):
setattr(self, '_group2', value)
@property
def group1_instances(self):
gi = getattr(self, '_group1_instances', None)
if gi is None:
gi = []
self._group1_instances = gi
return gi
@group1_instances.setter
def group1_instances(self, value):
setattr(self, '_group1_instances', value)
@property
def group2_instances(self):
gi = getattr(self, '_group2_instances', None)
if gi is None:
gi = []
self._group2_instances = gi
return gi
@group2_instances.setter
def group2_instances(self, value):
setattr(self, '_group2_instances', value)
@property
def image(self):
image = getattr(self, '_image', None)
if not image:
### Get an image to work with
if self.args.emi:
image = self.user.ec2.get_emi(emi=str(self.args.emi))
else:
image = self.user.ec2.get_emi(root_device_type="instance-store", basic_image=True)
if not image:
raise RuntimeError('couldnt find instance store image')
setattr(self, '_image', image)
return image
@property
def vpc_backend(self):
if not self.is_vpc_mode():
return None
if not hasattr(self, '_vpc_backend'):
self._vpc_backend = None
if not self._vpc_backend:
vpc_backend_host = self.sysadmin.clc_machine.hostname
try:
self._vpc_backend = Midget(vpc_backend_host, systemconnection=self.sysadmin)
except ImportError as IE:
self._vpc_backend = None
self.errormsg('Not Creating VPC backend DEBUG interface, err:"{0}"'.format(str(IE)))
except Exception as VBE:
self._vpc_backend = None
self.errormsg('FYI... Failed to create vpc backend interface, err:\n{0}'
'\nUnable to get VPC backend debug. Ignoring Error:"{1}"'
.format(self.tester.get_traceback(), str(VBE)))
return None
return self._vpc_backend
def errormsg(self, msg):
return self.log.error(red(msg))
def authorize_group_for_instance_list(self, group, instances):
for instance in instances:
assert isinstance(instance, EuInstance)
try:
self.user.ec2.authorize_group(group, protocol='tcp', port=22,
cidr_ip=instance.private_ip_address + "/32")
self.user.ec2.authorize_group(group, protocol='icmp', port='-1',
cidr_ip=instance.private_ip_address + "/32")
except:
self.user.ec2.show_instance(instance)
self.user.ec2.show_security_group(group)
self.errormsg('Failed to authorize group:{0} to allow private ip for '
'instance:"{1}/{2}"'.format(group,
instance.id,
instance.private_ip_address))
raise
def revoke_group_for_instance_list(self, group, instances):
for instance in instances:
assert isinstance(instance, EuInstance)
self.user.ec2.revoke_security_group(group, from_port='22', protocol='tcp',
cidr_ip=instance.private_ip_address + "/32")
self.user.ec2.revoke_security_group(group, from_port='-1', protocol='icmp',
cidr_ip=instance.private_ip_address + "/32")
def clean_method(self):
if self.args.no_clean:
self.status('No clean flag set, not cleaning test resources')
else:
errors = []
ins = self.group1_instances
ins.extend(self.group2_instances)
try:
self.user.ec2.terminate_instances(ins)
except EC2ResponseError as ER:
if ER.status == 400:
pass
else:
raise
except Exception as E:
errors.append(E)
self.log.error("{0}\n{1}".format(get_traceback(), E))
try:
self.user.ec2.delete_group(self.group1)
except EC2ResponseError as ER:
if ER.status == 400:
pass
else:
raise
except Exception as E:
errors.append(E)
self.log.error("{0}\n{1}".format(get_traceback(), E))
try:
self.user.ec2.delete_group(self.group2)
except EC2ResponseError as ER:
if ER.status == 400:
pass
else:
raise
except Exception as E:
errors.append(E)
self.log.error("{0}\n{1}".format(get_traceback(), E))
if errors:
raise RuntimeError("Error in cleanup:{0}"
.format(", ".join(str(e) for e in errors)))
def is_vpc_mode(self):
return 'VPC' in self.user.ec2.get_supported_platforms()
def get_proxy_machine(self, instance, use_mido_gw=False):
if self.is_vpc_mode():
if use_mido_gw:
gw_hosts = self.user.ec2.get_backend_vpc_gateways()
if not gw_hosts:
raise ValueError('No backend VPC gateways were found?')
# pick single gw host and ip for lookup purposes
gw_host_ip = self.user.ec2.clc.ssh.get_ipv4_lookup(gw_hosts[0])
if not gw_host_ip:
raise RuntimeError('Failed to lookup ipv4 address for host:"{0}"'
.format(gw_hosts[0]))
gw_host_ip = gw_host_ip[0]
gw_machine = self.sysadmin.get_host_by_hostname(gw_host_ip)
else:
gw_machine = self.sysadmin.clc_machine
return gw_machine
prop = self.sysadmin.get_property('{0}.cluster.networkmode'.format(instance.placement))
if prop.value.lower() == "edge":
proxy_machine = self.get_active_nc_for_instance(instance)
else:
proxy_machine = self.get_active_cc_for_instance(instance)
self.log.debug("Instance is running on: " + proxy_machine.hostname)
return proxy_machine
def get_vpc_proxy_ssh_connection(self, instance):
"""
Provides a means to communicate to instances within a VPC on their private interfaces
from the VPC namespace (for now this is the CLC). This will act as a sudo proxy interface
to the instances on their private network(s).
:param instance: an instance object to connect to
:param ssh_to_instance: boolean. If true will attempt to ssh from clc to instance for each
command.
"""
gw_machine = self.get_proxy_machine(instance=instance)
self.log.debug('Using "{0}" as the internal proxy machine for instance:{1}'
.format(gw_machine.hostname, instance))
if gw_machine:
vpc_proxy_ssh = gw_machine.ssh
else:
raise ValueError('Could not find eutester machine for ip: "{0}"'
.format(gw_machine.hostname))
if instance.keypath:
keyname= '{0}_{1}'.format(instance.id, os.path.basename(instance.keypath))
try:
vpc_proxy_ssh.sys('ls {0}'.format(keyname), code=0)
except CommandExitCodeException:
vpc_proxy_ssh.sftp_put(instance.keypath, keyname)
if not hasattr(vpc_proxy_ssh, 'orig_cmd_method'):
vpc_proxy_ssh.orig_cmd_method = vpc_proxy_ssh.cmd
def newcmd(cmd, **kwargs):
ssh_cmd = ('ip netns {0} ssh -o StrictHostKeyChecking=no -n -i {1} {2}@{3} "{4}"'
.format(instance.vpc_id, keyname, instance.username,
instance.private_ip_address,
cmd))
return vpc_proxy_ssh.orig_cmd_method(cmd, **kwargs)
vpc_proxy_ssh.cmd = newcmd
return vpc_proxy_ssh
def create_proxy_ssh_connection_to_instance(self, instance, retry=10):
if self.is_vpc_mode():
return self.get_vpc_proxy_ssh_connection(instance=instance)
proxy_machine = self.get_proxy_machine(instance)
ssh = None
attempts = 0
elapsed = 0
next_retry_time = 10
start = time.time()
proxy_keypath=proxy_machine.ssh.keypath or None
while not ssh and attempts < retry:
attempts += 1
elapsed = int(time.time()-start)
self.log.debug('Attempting to ssh to instances private ip:' + str(instance.private_ip_address) +
'through the cc ip:' + str(proxy_machine.hostname) + ', attempts:' +str(attempts) + "/" + str(retry) +
", elapsed:" + str(elapsed))
try:
ssh = SshConnection(host=instance.private_ip_address,
keypath=instance.keypath,
proxy=proxy_machine.hostname,
proxy_username=proxy_machine.ssh.username,
proxy_password=proxy_machine.ssh.password,
proxy_keypath=proxy_keypath)
except Exception, ce:
tb = get_traceback()
if attempts >= retry:
self.log.debug("\n" + tb,linebyline=False)
self.log.debug('Failed to connect error:' + str(ce))
if attempts < retry:
time.sleep(next_retry_time)
if not ssh:
raise Exception('Could not ssh to instances private ip:' + str(instance.private_ip_address) +
' through the cc ip:' + str(proxy_machine.hostname) + ', attempts:' +str(attempts) + "/" + str(retry) +
", elapsed:" + str(elapsed))
return ssh
def get_active_cc_for_instance(self,instance,refresh_active_cc=30):
elapsed = time.time()-self.cc_last_checked
self.cc_last_checked = time.time()
if elapsed > refresh_active_cc:
use_cached_list = False
else:
use_cached_list = True
cc = self.sysadmin.get_hosts_for_cluster_controllers(partition=instance.placement)[0]
return cc
def get_active_nc_for_instance(self,instance):
nc = self.sysadmin.get_hosts_for_node_controllers(instanceid=instance.id)[0]
return nc.machine
def ping_instance_private_ip_from_euca_internal(self, instance, ping_timeout=120):
assert isinstance(instance, EuInstance)
proxy_machine = self.get_proxy_machine(instance)
net_namespace = None
if self.is_vpc_mode():
net_namespace = instance.vpc_id
vpc_backend_retries = 0
max_retries = 1
while vpc_backend_retries <= max_retries:
if not self.vpc_backend:
vpc_backend_retries = max_retries + 1
try:
wait_for_result(self._ping_instance_private_ip_from_euca_internal,
result=True,
timeout=ping_timeout,
instance=instance,
proxy_machine=proxy_machine,
net_namespace=net_namespace)
except WaitForResultException:
self.errormsg('Failed to ping instance: {0}, private ip:{1} from internal host: {2}'
.format(instance.id,
instance.private_ip_address,
proxy_machine.hostname))
self.errormsg('Ping failure. Fetching network debug info from internal host...')
proxy_machine.dump_netfail_info(ip=instance.private_ip_address,
net_namespace=net_namespace)
self.errormsg('Done dumping network debug info from the "internal euca proxy host" @ '
'{0} '
'used in attempting to ping instance {1}, private ip: {2}'
.format(proxy_machine.hostname,
instance.id,
instance.private_ip_address))
if self.vpc_backend:
self.dump_vpc_backend_info_for_instance(instance)
raise
else:
raise
vpc_backend_retries += 1
self.log.debug('Successfully pinged instance: {0}, private ip:{1} from internal host: {2}'
.format(instance.id,
instance.private_ip_address,
proxy_machine.hostname))
def dump_vpc_backend_info_for_instance(self, instance):
if self.vpc_backend:
try:
self.vpc_backend.show_instance_network_summary(instance)
except Exception, ME:
self.log.debug('{0}\nCould not dump vpc backend debug, err:{1}'
.format(ME, get_traceback()))
def _ping_instance_private_ip_from_euca_internal(self,
instance,
proxy_machine,
net_namespace=None):
assert isinstance(instance, EuInstance)
try:
proxy_machine.ping_check(instance.private_ip_address,
net_namespace=net_namespace)
return True
except Exception, PE:
self.log.debug('Ping Exception:{0}'.format(PE))
self.log.debug('Failed to ping instance: {0}, private ip:{1} from internal host: {2}'
.format(instance.id,
instance.private_ip_address,
proxy_machine.hostname))
return False
def is_port_in_use_on_instance(self, instance, port, tcp=True, ipv4=True):
args = '-ln'
if tcp:
args += 't'
else:
args += 'u'
if ipv4:
args += 'A inet'
else:
args += 'A inet6'
use = instance.sys("netstat " + str(args) + " | awk '$6 ==" +
' "LISTEN" && $4 ~ ".' + str(port) +
'"' + "' | grep LISTEN")
if use:
self.log.debug('Port {0} IS in use on instance:'
.format(port, instance.id))
return True
else:
self.log.debug('Port {0} IS NOT in use on instance:'
.format(port, instance.id))
False
def is_port_range_in_use_on_instance(self, instance, start, end,
tcp=True, ipv4=True):
for x in xrange(start, end):
if self.is_port_in_use_on_instance(instance=instance,
port=x,
tcp=tcp,
ipv4=ipv4):
return True
return False
def show_instance_security_groups(self, instance):
assert isinstance(instance, Instance)
self.status('Showing security groups for instance: {0}'.format(instance.id))
for group in instance.groups:
self.user.ec2.show_security_group(group)
################################################################
# Test Methods
################################################################
def test1_create_instance_in_zones_for_security_group1(self, ping_timeout=180, zones=None):
'''
Definition:
Create test instances within each zone within security group1. This security group is authorized for
ssh access from 0.0.0.0/0.
This test attempts the following:
-To run an instance in each zone and confirm it reaches 'running' state.
-Confirm the instance is ping-able from the cc within a given timeout
-Establish and verify an ssh session directly from the local machine running this test.
-Place ssh key on instance for later use
-Add instance to global 'group1_instances'
'''
if zones and not isinstance(zones, list):
zones = [zones]
zones = zones or self.zones
for zone in zones:
#Create an instance, monitor it's state but disable the auto network/connect checks till afterward
instance = self.user.ec2.run_image(image=self.image,
keypair=self.keypair,
group=self.group1,
zone=zone,
auto_connect=False,
subnet_id=self.subnet_id,
monitor_to_running=False)[0]
self.group1_instances.append(instance)
self.user.ec2.monitor_euinstances_to_running(self.group1_instances)
#Now run the network portion.
for instance in self.group1_instances:
self.status('Checking connectivity to:'
+ str(instance.id) + ":" + str(instance.private_ip_address)
+ ", zone:" + str(instance.placement) )
assert isinstance(instance, EuInstance)
self.log.debug('Attempting to ping instances private ip from cc...')
self.ping_instance_private_ip_from_euca_internal(instance=instance, ping_timeout=ping_timeout)
self.log.debug('Attempting to ssh to instance from local test machine...')
self.log.debug('Check some debug information re this data connection in this security group first...')
self.show_instance_security_groups(instance)
self.user.ec2.does_instance_sec_group_allow(instance=instance,
src_addr=None,
protocol='tcp',
port=22)
try:
start = time.time()
instance.connect_to_instance(timeout=120)
except Exception, ConnectErr:
if self.vpc_backend:
self.errormsg('{0}\n{1}\nFailed to connect to instance:"{2}", dumping info '
.format(ConnectErr, get_traceback(), instance.id))
self.dump_vpc_backend_info_for_instance(instance)
raise ConnectErr
if instance.ssh:
self.status('SSH connection to instance:' + str(instance.id) +
' successful to public ip:' + str(instance.ip_address) +
', zone:' + str(instance.placement))
else:
raise RuntimeError('intance:{0} ssh is none, failed to connect after {1} seconds?'
.format(instance.id, int(time.time()-start)))
instance.sys('uname -a', code=0)
instance.ssh.sftp_put(instance.keypath, os.path.basename(instance.keypath))
instance.sys('chmod 0600 ' + os.path.basename(instance.keypath), code=0 )
def test2_create_instance_in_zones_for_security_group2(self, ping_timeout=180,
auto_connect=False, zones=None):
'''
Definition:
This test attempts to create an instance in each zone within security group2 which should not
be authorized for any remote access (outside of the CC).
The test attempts the following:
-To run an instance in each zone and confirm it reaches 'running' state.
-Confirm the instance is ping-able from the cc within a given timeout
-Establish and verify an ssh session using the cc as a proxy.
-Place ssh key on instance for later use
-Add instance to global 'group2_instances'
:params ping_timeout: Int Time to wait for ping for successful ping to instance(s)
:params auto_connect: Boolean. If True will auto ssh to instance(s), if False will
use cc/nc as ssh proxy
:params zones: List of names of Availability zone(s) to create instances in
'''
if zones and not isinstance(zones, list):
zones = [zones]
zones = zones or self.zones
for zone in self.zones:
instance = self.user.ec2.run_image(image=self.image,
keypair=self.keypair,
group=self.group2,
zone=zone,
subnet_id = self.subnet_id,
auto_connect=auto_connect,
monitor_to_running=False)[0]
self.group2_instances.append(instance)
self.user.ec2.monitor_euinstances_to_running(self.group2_instances)
for instance in self.group2_instances:
self.status('Checking connectivity to:' + str(instance.id) + ":" + str(instance.private_ip_address)+
", zone:" + str(instance.placement) )
assert isinstance(instance, EuInstance)
self.show_instance_security_groups(instance)
self.ping_instance_private_ip_from_euca_internal(instance=instance,
ping_timeout=ping_timeout)
if not auto_connect:
self.status('Make sure ssh is working through an internal euca path before '
'trying between instances...')
instance.proxy_ssh = self.create_proxy_ssh_connection_to_instance(instance)
self.status('SSH connection to instance:' + str(instance.id) +
' successful to private ip:' + str(instance.private_ip_address) +
', zone:' + str(instance.placement))
else:
instance.proxy_ssh = instance.ssh
instance.proxy_ssh.sys('uname -a', code=0)
self.status('Uploading keypair to instance in group2...')
instance.proxy_ssh.sftp_put(instance.keypath, os.path.basename(instance.keypath))
instance.proxy_ssh.sys('chmod 0600 ' + os.path.basename(instance.keypath), code=0 )
self.status('Done with create instance security group2:' + str(instance.id))
def test3_test_ssh_between_instances_in_diff_sec_groups_same_zone(self):
'''
Definition:
This test attempts to set up security group rules between group1 and group2 to authorize group2 access
from group1. If no_cidr is True security groups will be setup using cidr notation ip/mask for each instance in
group1, otherwise the entire source group 1 will be authorized.
Test attempts to:
-Authorize security groups for inter group private ip access.
-Iterate through each zone and attempt to ssh from an instance in group1 to an instance in group2 over their
private ips.
- Run same 2 tests from above by authorizing a SecurityGroup
'''
def check_instance_connectivity():
max_retries = 1
vpc_backend_retries = 0
while vpc_backend_retries <= max_retries:
if not self.vpc_backend:
vpc_backend_retries = max_retries + 1
try:
for zone in self.zones:
instance1 = None
instance2 = None
for instance in self.group1_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instance1 = instance
break
if not instance1:
raise Exception('Could not find instance in group1 for zone:' +
str(zone))
for instance in self.group2_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instance2 = instance
break
if not instance2:
raise Exception('Could not find instance in group2 for zone:'
+ str(zone))
self.status(
'Attempting to run ssh command "uname -a" between instances across '
'security groups:\n'
+ str(instance1.id) + '/sec grps(' + str(instance1.security_groups) +
") --> "
+ str(instance2.id) + '/sec grps(' + str(instance2.security_groups) + ")\n"
+ "Current test run in zone: " + str(zone))
self.log.debug('Check some debug information re this data connection in this '
'security group first...')
self.show_instance_security_groups(instance2)
self.user.ec2.does_instance_sec_group_allow(instance=instance2,
src_addr=instance1.private_ip_address,
protocol='tcp',
port=22)
self.log.debug('Reset ssh connection to instance:{0} first...'
.format(instance1.id))
instance1.connect_to_instance()
self.status('Now Running the ssh command which checks connectivity from '
'instance1 to instance2...')
instance1.sys("ssh -o StrictHostKeyChecking=no -i "
+ str(os.path.basename(instance1.keypath))
+ " root@" + str(instance2.private_ip_address)
+ " 'uname -a'", code=0)
self.status('"{0}" to "{1}" connectivity test succeeded'.format(instance1.id,
instance2.id))
except Exception, ConnectivityErr:
if vpc_backend_retries:
if self.vpc_backend:
self.errormsg('Retry still failed connectivity test after restarting '
'vpc backend')
raise ConnectivityErr
elif self.vpc_backend:
self.dump_vpc_backend_info_for_instance(instance1)
self.dump_vpc_backend_info_for_instance(instance2)
self.errormsg('Could not connect to instance:"{0}"'
.format(instance.id))
raise ConnectivityErr
else:
raise ConnectivityErr
else:
if self.vpc_backend and vpc_backend_retries:
self.log.debug('MidoRetries:{0}'.format(vpc_backend_retries))
raise MidoError('Connectivity test passed, but only after '
'restarting Midolman.')
else:
self.status('Ssh between instances passed')
break
self.status('Authorizing access from group1 individual instance IPs to group2, '
'then checking connectivity...')
self.authorize_group_for_instance_list(self.group2, self.group1_instances)
self.status('group2 should now allow access from each individual instance IP from '
'group1...')
self.user.ec2.show_security_group(self.group2)
check_instance_connectivity()
self.status('Revoking auth for group1 instances from group2, then re-add using '
'the using the group id instead of invididual instance IPs...')
self.revoke_group_for_instance_list(self.group2, self.group1_instances)
self.status('group2 should no longer have authorization from the individual instance IPs'
'from group1...')
self.user.ec2.show_security_group(self.group2)
self.status('Auth group1 access to group2...')
self.user.ec2.authorize_group(self.group2, cidr_ip=None, port=22,
protocol='tcp', src_security_group=self.group1)
self.user.ec2.authorize_group(self.group2, cidr_ip=None, port=None,
protocol='icmp', src_security_group=self.group1)
self.status('Group2 should now allow access from source group1 on tcp/22 and icmp...')
self.user.ec2.show_security_group(self.group2)
check_instance_connectivity()
def test4_attempt_unauthorized_ssh_from_test_machine_to_group2(self):
'''
Description:
Test attempts to verify that the local machine cannot ssh to the instances within group2 which is not authorized
for ssh access from this source.
'''
for instance in self.group2_instances:
assert isinstance(instance, EuInstance)
#Provide some debug information re this data connection in this security group
self.status('Attempting to ssh from local test machine to instance: {0}, '
'this should not be allowed...'.format(instance.id))
self.show_instance_security_groups(instance)
self.user.ec2.does_instance_sec_group_allow(instance=instance, src_addr=None, protocol='tcp',port=22)
try:
instance.reset_ssh_connection(timeout=5)
if self.vpc_backend:
try:
self.vpc_backend.show_instance_network_summary(instance)
except Exception, ME:
self.log.debug('{0}\nCould not dump Mido debug, err:{1}'
.format(ME, get_traceback()))
raise Exception('Was able to connect to instance: ' + str(instance.id) + ' in security group:'
+ str(self.group2.name))
except:
self.log.debug('Success: Was not able to ssh from the local machine to instance in unauthorized sec group')
def test5_test_ssh_between_instances_in_same_sec_groups_different_zone(self):
'''
Definition:
This test attempts to check connectivity for instances in the same security group, but in different zones.
Note: This test requires the CC have tunnelling enabled, or the CCs in each zone be on same
layer 2 network segment.
Test attempts to:
-Iterate through each zone and attempt to ssh from an instance in group1 to an instance in a separate zone
but same security group1 over their private ips.
'''
zones = []
if len(self.zones) < 2:
raise SkipTestException('Skipping test5, only a single zone found or provided')
for zone in self.zones:
zones.append(TestZone(zone))
#Grab a single instance from each zone within security group1
for zone in zones:
instance = None
for instance in self.group1_instances:
if instance.placement == zone.zone:
assert isinstance(instance, EuInstance)
zone.test_instance_group1 = instance
break
instance = None
if not zone.test_instance_group1:
raise Exception('Could not find an instance in group1 for zone:' + str(zone.zone))
self.log.debug('Iterating through zones, attempting ssh between zones within same security group...')
for zone in zones:
instance1 = zone.test_instance_group1
for zone2 in zones:
if zone.zone != zone2.zone:
instance2 = zone2.test_instance_group1
if not instance1 or not instance2:
raise Exception('Security group: ' + str(self.group1.name) + ", missing instances in a Zone:"
+ str(zone.zone) + " = instance:" + str(instance1) +
", Zone:" + str(zone2.zone) + " = instance:" + str(instance2))
self.log.debug('Attempting to run ssh command "uname -a" between instances across zones and security groups:\n'
+ str(instance1.id) + '/sec grps(' + str(instance1.security_groups)+") --> "
+ str(instance2.id) + '/sec grps(' + str(instance2.security_groups)+")\n"
+ "Current test run in zones: " + str(instance1.placement) + "-->" + str(instance2.placement),
linebyline=False )
self.log.debug('Check some debug information re this data connection in this security group first...')
self.user.ec2.does_instance_sec_group_allow(instance=instance2,
src_addr=instance1.private_ip_address,
protocol='tcp',
port=22)
self.log.debug('Now Running the ssh command...')
try:
instance1.sys("ssh -o StrictHostKeyChecking=no -i "
+ str(os.path.basename(instance1.keypath))
+ " root@" + str(instance2.private_ip_address)
+ " ' uname -a'", code=0)
self.log.debug('Ssh between instances passed')
except Exception, ME:
if self.vpc_backend:
try:
self.vpc_backend.show_instance_network_summary(instance)
except Exception, ME:
self.log.debug('{0}\nCould not dump Mido debug, err:{1}'
.format(ME, get_traceback()))
raise
def test6_test_ssh_between_instances_in_diff_sec_groups_different_zone(self):
'''
Definition:
This test attempts to set up security group rules between group1 and group2 to authorize group2 access
from group1 across different zones.
If no_cidr is True security groups will be setup using cidr notication ip/mask for each instance in
group1, otherwise the entire source group 1 will authorized.
the group will be
Note: This test requires the CC have tunnelling enabled, or the CCs in each zone be on same
layer 2 network segment.
Test attempts to:
-Authorize security groups for inter group private ip access.
-Iterate through each zone and attempt to ssh from an instance in group1 to an instance in group2 over their
private ips.
'''
zones = []
if len(self.zones) < 2:
raise SkipTestException('Skipping test5, only a single zone found or provided')
self.status('Authorizing group2:' + str(self.group2.name) + ' for access from group1:' + str(self.group1.name))
self.user.ec2.authorize_group(self.group2, cidr_ip=None, port=None,
src_security_group=self.group1)
for zone in self.zones:
zones.append(TestZone(zone))
self.log.debug('Grabbing a single instance from each zone and from each test security group to use in this test...')
for zone in zones:
instance = None
for instance in self.group1_instances:
if instance.placement == zone.zone:
assert isinstance(instance, EuInstance)
zone.test_instance_group1 = instance
break
instance = None
if not zone.test_instance_group1:
raise Exception('Could not find an instance in group1 for zone:' + str(zone.zone))
instance = None
for instance in self.group2_instances:
if instance.placement == zone.zone:
assert isinstance(instance, EuInstance)
zone.test_instance_group2 = instance
break
if not zone.test_instance_group2:
raise Exception('Could not find instance in group2 for zone:' + str(zone.zone))
instance = None
self.status('Checking connectivity for instances in each zone, in separate but authorized security groups...')
for zone in zones:
instance1 = zone.test_instance_group1
if not instance1:
raise Exception('Missing instance in Security group: ' + str(self.group1.name) + ', Zone:' +
str(zone) + " = instance:" + str(instance1) )
for zone2 in zones:
if zone.zone != zone2.zone:
instance2 = zone2.test_instance_group2
if not instance2:
raise Exception('Missing instance in Security group: ' + str(self.group2.name) + ', Zone:' +
str(zone2.zone) + " = instance:" + str(instance2) )
self.log.debug('Attempting to run ssh command "uname -a" between instances across zones and security groups:\n'
+ str(instance1.id) + '/sec grps(' + str(instance1.security_groups)+") --> "
+ str(instance2.id) + '/sec grps(' + str(instance2.security_groups)+")\n"
+ "Current test run in zones: " + str(instance1.placement) + "-->" + str(instance2.placement),
linebyline=False )
self.log.debug('Check some debug information re this data connection in this security group first...')
self.user.ec2.does_instance_sec_group_allow(instance=instance2,
src_addr=instance1.private_ip_address,
protocol='tcp',
port=22)
self.log.debug('Now Running the ssh command...')
instance1.sys("ssh -o StrictHostKeyChecking=no -i "
+ str(os.path.basename(instance1.keypath))
+ " root@" + str(instance2.private_ip_address)
+ " ' uname -a'", code=0)
self.log.debug('Ssh between instances passed')
def test7_add_and_revoke_tcp_port_range(self,
start=None,
src_cidr_ip='0.0.0.0/0',
count=10,
instances=None,
retry_interval=15):
'''
Definition:
Attempts to add a range of ports to a security group and test
the ports from the local machine to make sure they are available.
Next the test revokes the ports and verifies they are no longer
available.
:param start: starting port of range to scan
:param src_cidr_ip: cidr ip for src authorization. If None the test
will attempt to discovery the cidr ip of the
machine running this test to use for src auth ip.
:param count: number of consecutive ports from 'start' to test
:param tcp: boolean tcp if true, udp if false
'''
if instances:
if not isinstance(instances, list):
instances = [instances]
for instance in instances:
assert isinstance(instance, EuInstance)
else:
instances = self.group1_instances
if not instances:
raise ValueError('Could not find instance in group1')
# Iterate through all instances and test...
for instance1 in instances:
# Make sure we can ssh to this instance (note this may need to be
# adjusted for windows access
# 'does_instance_sec_group_allow' will set user.ec2.local_machine_source_ip to the
# ip the local machine uses to communicate with the instance.
instance1.netcat_name = 'netcat'
if src_cidr_ip is None:
if not self.user.ec2.does_instance_sec_group_allow(instance=instance1,
protocol='tcp',
port=22):
src_cidr_ip = str(self.user.ec2.local_machine_source_ip) + '/32'
self.user.ec2.authorize_group(self.group1,
protocol='tcp',
cidr_ip=src_cidr_ip,
port=22)
else:
self.user.ec2.authorize_group(self.group1,
protocol='tcp',
cidr_ip=src_cidr_ip,
port=22)
try:
instance1.sys('which {0}'.format(instance1.netcat_name), code=0)
except CommandExitCodeException:
got_it = False
for pkg in ['nc', 'netcat']:
try:
instance1.sys('apt-get install {0} -y'.format(pkg), code=0)
got_it = True
break
except CommandExitCodeException:
try:
instance1.sys('yum install {0} -y'.format(pkg), code=0)
got_it = True
break
except CommandExitCodeException:
self.log.debug('could install "{0}" on this instance'.format(pkg))
if not got_it:
raise RuntimeError('Could not install netcat on: {0}'.format(instance1))
instance1.netcat_name = pkg
#make sure we have an open port range to play with...
if start is None:
for x in xrange(2000,65000):
if self.is_port_range_in_use_on_instance(instance=instance1,
start=x,
end=x+count,
tcp=True):
x=x+count
else:
start=x
break
if not start:
raise RuntimeError('Free consecutive port range of count:{0} '
'not found on instance:{1}'
.format(count, instance1.id))
# authorize entire port range...
self.user.ec2.authorize_group(self.group1,
protocol='tcp',
cidr_ip=src_cidr_ip,
port=start,
end_port=start+count)
auth_starttime = time.time()
# test entire port range is accessible from this machine
test_file = 'eutester_port_test.txt'
#Allow some delay for the rule to be applied in the network...
time.sleep(10)
for x in xrange(start, start+count):
# Set up socket listener with netcat, to make sure we're not
# connecting to the CC or other device write port to file and
# verify file contents as well.
test_string = '{0} last port tested[{1}]'.format(time.time(), x)
self.log.debug("Gathering debug information as to whether the "
"tester's src ip is authorized for this port test...")
if not self.user.ec2.does_instance_sec_group_allow(
instance=instance1,
src_addr=src_cidr_ip.split('/')[0],
protocol='tcp',
port=x):
raise ValueError('Group:{0} did not have {1}:{2} authorized'
.format(self.group1.name,
src_cidr_ip.split('/')[0],
x))
# start up netcat, sleep to allow nohup to work before quiting
# the shell...
instance1.sys('killall -9 {0} 2> /dev/null'.format(instance1.netcat_name),
timeout=5)
instance1.sys('{' + ' ( nohup {0} -k -l {1} > {2} ) & sleep 1; '
.format(instance1.netcat_name, x, test_file) + '}',
code=0, timeout=5)
# attempt to connect socket at instance/port and send the
# test_string...
time.sleep(2) #Allow listener to setup...
done = False
attempt =0
while not done:
try:
attempt += 1
test_port_status(ip=instance1.ip_address,
port=x,
tcp=True,
send_buf=test_string,
verbose=True)
done = True
except socket.error as SE:
self.log.debug('Failed to poll port status on attempt {0}, elapsed since auth '
'request:"{1}"'
.format(attempt, int(time.time()-auth_starttime)))
try:
self.log.debug('Failed to connect to "{0}":IP:"{1}":'
'PORT:"{2}"'.format(instance1.id,
instance1.ip_address,
x))
self.user.show_security_group(self.group1)
try:
self.log.debug('Getting netcat info from instance...')
instance1.sys('ps aux | grep {0}'.format(instance1.netcat_name),
timeout=10)
except CommandExitCodeException:
pass
self.log.debug('Iptables info from Euca network component '
'responsible for this instance/security '
'group...')
proxy_machine = self.get_proxy_machine(instance1)
proxy_machine.sys('iptables-save', timeout=10)
except:
self.log.debug('Error when fetching debug output for '
'failure, ignoring:' +
str(get_traceback()))
if attempt >= 2:
raise SE
self.log.debug('Sleeping {0} seconds before next attempt:({1}/{2})'
.format(retry_interval, attempt, '2'))
time.sleep(retry_interval)
# Since no socket errors were encountered assume we connected,
# check file on instance to make sure we didn't connect somewhere
# else like the CC...
instance1.sys('grep "{0}" {1}; echo "" > {1}'
.format(test_string, test_file),
code=0)
self.status('Port "{0}" successfully tested on instance:{1}/{2}'
.format(x, instance1.id, instance1.ip_address))
self.status('Authorizing port range {0}-{1} passed'
.format(start, start+count))
self.status('Now testing revoking by removing the same port'
'range...')
time.sleep(3)
self.user.ec2.revoke_security_group(group=self.group1, from_port=start,
to_port=start + count, protocol='tcp',
cidr_ip=src_cidr_ip)
#Allow some delay for the rule to be applied in the network...
time.sleep(10)
for x in xrange(start, start+count):
# Set up socket listener with netcat, to make sure we're not
# connecting to the CC or other device write port to file and
# verify file contents as well.
# This portion of the test expects that the connection will fail.
test_string = '{0} last port tested[{1}]'.format(time.time(), x)
self.log.debug("Gathering debug information as to whether the "
"tester's src ip is authorized for this port test...")
if self.user.ec2.does_instance_sec_group_allow(
instance=instance1,
src_addr=src_cidr_ip.split('/')[0],
protocol='tcp',
port=x):
raise ValueError('Group:{0} has {1}:{2} authorized after revoke'
.format(self.group1.name,
src_cidr_ip,
x))
try:
instance1.sys('killall -9 {0} 2> /dev/null'.format(instance1.netcat_name),
timeout=5)
instance1.sys('{' + ' ( nohup {0} -k -l {1} > {2} ) & sleep 1; '
.format(instance1.netcat_name, x, test_file) + '}',
code=0, timeout=5)
test_port_status(ip=instance1.ip_address,
port=x,
tcp=True,
send_buf=test_string,
verbose=True)
#We may still need to test the file content for the UDP case...
# Since no socket errors were encountered assume we connected,
# check file on instance to make sure we didn't connect somewhere
# else like the CC. Dont' error here cuz it's already a bug...
instance1.sys('grep "{0}" {1}; echo "" > {1}'
.format(test_string, test_file))
except (socket.error, CommandExitCodeException) as OK:
self.status('Port "{0}" successfully revoked on '
'instance:{1}/{2}'
.format(x, instance1.id, instance1.ip_address))
self.status('Add and revoke ports test passed')
def test8_verify_deleting_of_auth_source_group2(self):
"""
Definition:
Attempts to delete a security group which has been authorized by another security group.
-Authorizes group1 access from group2
-Validates connectivity for instances in group1 can be accessed from group2
-Deletes group2, validates group1 still allows traffic from other authorized sources
"""
zones = []
for zone in self.zones:
zones.append(TestZone(zone))
# Make sure the groups are created.
self.status('Checking and/or create test security groups, and at least one instance'
'running in them per zone...')
self.setup_test_security_groups()
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group2, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
for zone in self.zones:
instances_group1 = []
instances_group2 = []
for instance in self.group1_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instances_group1.append(instance)
if len(instances_group1) < 1:
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
for instance in self.group2_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instances_group2.append(instance)
if len(instances_group2) < 1:
self.test2_create_instance_in_zones_for_security_group2(zones=[zone])
self.status('Clean out any existing rules in group1 to start with a clean group...')
self.user.ec2.revoke_all_rules(self.group1)
self.user.ec2.show_security_group(self.group1)
instance1 = self.group1_instances[0]
#Add back ssh
assert not self.user.ec2.does_instance_sec_group_allow(instance=instance1,
protocol='tcp',
port=22), \
'Instance: {0}, security group still allows access after ' \
'revoking all rules'
self.status('Authorize group1 access from group testing machine ssh (tcp/22)...')
self.user.ec2.authorize_group(self.group1,
# cidr_ip=str(user.ec2.local_machine_source_ip) + '/32',
cidr_ip='0.0.0.0/0', # open to 0/0 to avoid nat issues
protocol='tcp',
port=22)
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
self.user.ec2.show_security_group(self.group1)
self.status('Test ssh access from this testing machine to each instance in group1...')
for instance in self.group1_instances:
try:
instance.printself()
self.user.ec2.does_instance_sec_group_allow(instance=instance, protocol='tcp', port=22)
except:
pass
instance.connect_to_instance()
instance.sys('echo "reset ssh worked"', code=0)
self.status('Authorizing group2 access to group1...')
self.user.ec2.authorize_group(self.group1,
cidr_ip=None,
port=-1,
protocol='icmp',
src_security_group=self.group2)
# For debug purposes allow ssh from anywhere here...
self.user.ec2.authorize_group(self.group1,
cidr_ip=None,
port=22,
protocol='tcp')
self.status('Sleeping for 10 seconds to allow rule/network'
' to set...')
time.sleep(10)
self.user.ec2.show_security_group(self.group1)
self.status('Checking auth from group2 to group1 instances...')
self.log.debug('Check some debug information re this data connection in this security '
'group first...')
for zone in zones:
for instance in self.group1_instances:
if instance.placement == zone.name:
zone.test_instance_group1 = instance
if not zone.test_instance_group1.ssh:
self.status('Instance in group1 did not have an ssh connection, '
'trying to setup ssh now...')
self.user.ec2.show_security_groups_for_instance(zone.test_instance_group1)
self.log.debug('ssh connect using instance:"{0}", keypath:"{1}"'
.format(zone.test_instance_group1,
zone.test_instance_group1.keypath))
zone.test_instance_group1.connect_to_instance()
break
for instance in self.group2_instances:
if instance.placement == zone.name:
zone.test_instance_group2 = instance
if not zone.test_instance_group2.ssh:
self.status('Instance in group1 did not have an ssh connection, '
'trying to setup ssh now...')
self.user.ec2.show_security_groups_for_instance(zone.test_instance_group2)
self.log.debug('ssh connect using instance:"{0}", keypath:"{1}"'
.format(zone.test_instance_group2,
zone.test_instance_group2.keypath))
zone.test_instance_group2.connect_to_instance()
break
if not zone.test_instance_group1:
raise ValueError('Could not find instances in sec group1'
'group for zone:' + str(zone.name))
if not zone.test_instance_group2:
raise ValueError('Could not find instances in sec group2'
'group for zone:' + str(zone.name))
assert isinstance(zone.test_instance_group1, EuInstance)
assert isinstance(zone.test_instance_group2, EuInstance)
for zone in zones:
#Make sure the instance in group1 has allowed icmp access from group2
allowed = False
if self.user.ec2.does_instance_sec_group_allow(
instance=zone.test_instance_group1,
src_group=self.group2,
protocol='icmp',
port='-1'):
allowed = True
if not allowed:
raise ValueError('Group2 instance not allowed in group1'
' after authorizing group2')
self.status('Attempting to ping group1 instance from group2 '
'instance using their private IPs')
try:
zone.test_instance_group2.ssh.verbose = True
zone.test_instance_group2.sys(
'ping -c 1 {0}'
.format(zone.test_instance_group1.private_ip_address),
code=0,verbose=True)
except:
self.errormsg('Failed to ping from group2 to group1 instance '
'after authorizing the source group2')
raise
self.status('Terminating all instances in group2 in order to delete '
'security group2')
self.user.ec2.terminate_instances(self.group2_instances)
self.group2_instances = []
self.user.ec2.delete_group(self.group2)
self.status('Now confirm that ssh still works for all instances in group1')
for instance in self.group1_instances:
self.user.ec2.show_security_groups_for_instance(instance)
self.log.debug('Attempting to connect to instance from source IP: "{0}"'
.format(self.user.ec2.local_machine_source_ip))
instance.connect_to_instance(timeout=300)
instance.sys('echo "Getting hostname from {0}"; hostname'
.format(instance.id), code=0)
self.status('Passed. Group1 ssh working after deleting src group which '
'was authorized to group1')
def test9_ssh_between_instances_same_group_same_zone_public(self):
"""
Definition:
For each zone this test will attempt to test ssh between two instances in the same
security group using the public ips of the instances.
-Authorize group for ssh access
-Re-use or create 2 instances within the same security group, same zone
-For each zone, attempt to ssh to a vm in the same security group same zone
"""
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
for zone in self.zones:
instances =[]
for instance in self.group1_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instances.append(instance)
if len(instances) < 2:
for x in xrange(len(instances), 2):
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
for zone in self.zones:
zone_instances = []
for instance in self.group1_instances:
if instance.placement == zone:
zone_instances.append(instance)
instance1 = zone_instances[0]
instance2 = zone_instances[1]
instance1.ssh.sftp_put(instance1.keypath, 'testkey.pem')
instance1.sys('chmod 0600 testkey.pem')
testphrase = "pubsamezone_test_from_instance1_{0}".format(instance1.id)
testfile = 'testfile.txt'
retry = 2
for x in xrange(0, retry):
try:
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.ip_address, testphrase, testfile),
code=0, timeout=20)
break
except (CommandTimeoutException, CommandExitCodeException) as CE:
self.status('Attempt #{0} to connect between instances failed:"{1}'
.format(x, str(CE)))
if x:
raise
instance2.sys('hostname; ifconfig; pwd; ls; cat {0} | grep {1}'
.format(testfile, testphrase), code=0)
def test10_ssh_between_instances_same_group_public_different_zone(self):
"""
Definition:
If multiple zones are detected, this test will attempt to test ssh between
two instances in the same security group and accross each zone using the public ips
of the instances
-Authorize group for ssh access
-Re-use or create 2 instances within the same security group, different zone(s)
-For each zone, attempt to ssh to a vm in the same security group different zone(s)
"""
if len(self.zones) < 2:
raise SkipTestException('Skipping multi-zone test, '
'only a single zone found or provided')
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
zone_instances = {}
for zone in self.zones:
instances =[]
for instance in self.group1_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instances.append(instance)
if len(instances) < 1:
for x in xrange(len(instances), 1):
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
zone_instances[zone] = instances
for zone1 in self.zones:
instance1 = zone_instances[zone1][0]
instance1.ssh.sftp_put(instance1.keypath, 'testkey.pem')
instance1.sys('chmod 0600 testkey.pem')
for zone2 in self.zones:
if zone != zone2:
instance2 = zone_instances[zone2][0]
testphrase = "diffpubzone_test_from_instance1_{0}".format(instance1.id)
testfile = 'testfile.txt'
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.ip_address, testphrase, testfile),
code=0,
timeout=10)
instance2.sys('cat {0} | grep {1}'.format(testfile, testphrase), code=0)
def test11_ssh_between_instances_same_group_same_zone_private(self):
"""
Definition:
For each zone this test will attempt to test ssh between two instances in the same
security group using the private ips of the instances.
-Authorize group for ssh access
-Re-use or create 2 instances within the same security group, same zone
-For each zone, attempt to ssh to a vm in the same security group same zone
"""
# Remove all rules from the group and add back the minimum amount of rules to run
# this test...
self.user.ec2.revoke_all_rules(self.group1)
time.sleep(1)
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
self.status('Using 2 instances from each zone within the following security group to'
'test private ip connectivity:"{0}"'.format(self.group1))
self.user.ec2.show_security_group(self.group1)
for zone in self.zones:
instances =[]
for instance in self.group1_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instances.append(instance)
if len(instances) < 2:
for x in xrange(len(instances), 2):
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
for zone in self.zones:
zone_instances = []
zone_name = getattr(zone, 'name', None) or zone
for instance in self.group1_instances:
if instance.placement == zone_name:
zone_instances.append(instance)
instance1 = zone_instances[0]
instance2 = zone_instances[1]
instance1.ssh.sftp_put(instance1.keypath, 'testkey.pem')
instance1.sys('chmod 0600 testkey.pem')
testphrase = "hello_from_instance1_{0}".format(instance1.id)
testfile = 'testfile.txt'
self.status("Attempting to ssh from instance:{0} to instance:{1}'s private ip:{2}"
.format(instance1.id, instance2.id, instance2.private_ip_address))
try:
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.private_ip_address, testphrase, testfile),
code=0,
timeout=10)
except Exception, se:
self.status('First attempt to ssh between instances failed, err: ' + str(se) +
'\nIncreasing command timeout to 20 seconds, and trying again. ')
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.private_ip_address, testphrase, testfile),
code=0,
timeout=20)
self.status('Cat the test file create from the ssh cmd {0} ran on on {1}...'
.format(instance1, instance2))
instance2.sys('cat {0} | grep {1}'.format(testfile, testphrase), code=0)
def test12_ssh_between_instances_same_group_private_different_zone(self):
"""
Definition:
If multiple zones are detected, this test will attempt to test ssh between
two instances in the same security group and across each zone using the instances'
private ip addresses.
-Authorize group for ssh access
-Re-use or create 2 instances within the same security group, different zone(s)
-For each zone, attempt to ssh to a vm in the same security group different zone(s)
"""
if len(self.zones) < 2:
raise SkipTestException('Skipping multi-zone test, '
'only a single zone found or provided')
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
for zone in self.zones:
instances =[]
for instance in self.group1_instances:
if instance.placement == zone:
assert isinstance(instance, EuInstance)
instances.append(instance)
if len(instances) < 1:
for x in xrange(len(instances), 1):
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
for zone1 in self.zones:
zone_instances = []
for instance in self.group1_instances:
if instance.placement == zone1:
zone_instances.append(instance)
instance1 = zone_instances[0]
instance1.ssh.sftp_put(instance1.keypath, 'testkey.pem')
instance1.sys('chmod 0600 testkey.pem')
for zone2 in self.zones:
if zone1 != zone2:
zone2_instances = []
for instance in self.group1_instances:
if instance.placement == zone2:
zone2_instances.append(instance)
instance2 = zone_instances[0]
testphrase = "diffprivzone_test_from_instance1_{0}".format(instance1.id)
testfile = 'testfile.txt'
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.ip_address, testphrase, testfile),
code=0,
timeout=10)
instance2.sys('cat {0} | grep {1}'.format(testfile, testphrase), code=0)
def test13_ssh_between_instances_diff_group_private_different_zone(self):
"""
Definition:
If multiple zones are detected, this test will attempt to test ssh between
two instances in the same security group and across each zone using the instances'
private ip addresses.
-Authorize group for ssh access
-Re-use or create 2 instances within the same security group, different zone(s)
-For each zone, attempt to ssh to a vm in the same security group different zone(s)
"""
if len(self.zones) < 2:
raise SkipTestException('Skipping multi-zone test, '
'only a single zone found or provided')
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
# In case a previous test has deleted group2...
self.group2 = self.user.ec2.add_group(self.group2.name)
self.user.ec2.authorize_group(self.group2, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group2, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
for zone in self.zones:
instance1 = None
instances =[]
for instance in self.group1_instances:
if instance.placement == zone:
instance1 = instance
if not instance1:
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
for instance in self.group1_instances:
if instance.placement == zone:
instance1 = instance
instance1.ssh.sftp_put(instance1.keypath, 'testkey.pem')
instance1.sys('chmod 0600 testkey.pem')
for zone2 in self.zones:
instance2 = None
if zone2 != zone:
for instance in self.group2_instances:
if instance.placement == zone2:
instance2 = instance
if not instance2:
self.test2_create_instance_in_zones_for_security_group2(zones=[zone2],
auto_connect=True)
for instance in self.group2_instances:
if instance.placement == zone2:
instance2 = instance
testphrase = "diffprivzone_test_from_instance1_{0}".format(instance1.id)
testfile = 'testfile.txt'
self.status('Testing instance:{0} zone:{1} --ssh--> instance:{2} zone:{3} '
'-- private ip'.format(instance1.id, zone,instance2.id, zone2))
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.private_ip_address, testphrase, testfile),
code=0,
timeout=10)
instance2.sys('cat {0} | grep {1}'.format(testfile, testphrase), code=0)
def test14_ssh_between_instances_diff_group_public_different_zone(self):
"""
Definition:
If multiple zones are detected, this test will attempt to test ssh between
two instances in the same security group and across each zone using the instances'
private ip addresses.
-Authorize group for ssh access
-Re-use or create 2 instances within the same security group, different zone(s)
-For each zone, attempt to ssh to a vm in the same security group different zone(s)
"""
if len(self.zones) < 2:
raise SkipTestException('Skipping multi-zone test, '
'only a single zone found or provided')
self.user.ec2.authorize_group(self.group1, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group1, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
# In case a previous test has deleted group2...
self.group2 = self.user.ec2.add_group(self.group2.name)
self.user.ec2.authorize_group(self.group2, port=22, protocol='tcp', cidr_ip='0.0.0.0/0')
self.user.ec2.authorize_group(self.group2, port=-1, protocol='icmp', cidr_ip='0.0.0.0/0')
for zone in self.zones:
instance1 = None
instances =[]
for instance in self.group1_instances:
if instance.placement == zone:
instance1 = instance
if not instance1:
self.test1_create_instance_in_zones_for_security_group1(zones=[zone])
for instance in self.group1_instances:
if instance.placement == zone:
instance1 = instance
instance1.ssh.sftp_put(instance1.keypath, 'testkey.pem')
instance1.sys('chmod 0600 testkey.pem')
for zone2 in self.zones:
instance2 = None
if zone2 != zone:
for instance in self.group2_instances:
if instance.placement == zone2:
instance2 = instance
if not instance2:
self.test2_create_instance_in_zones_for_security_group2(zones=[zone2],
auto_connect=True)
for instance in self.group2_instances:
if instance.placement == zone2:
instance2 = instance
testphrase = "diffprivzone_test_from_instance1_{0}".format(instance1.id)
testfile = 'testfile.txt'
self.status('Testing instance:{0} zone:{1} --ssh--> instance:{2} zone:{3} '
'-- private ip'.format(instance1.id, zone,instance2.id, zone2))
instance1.sys("ssh -o StrictHostKeyChecking=no -i testkey.pem root@{0} "
"\'echo {1} > {2}; hostname; ifconfig; pwd; ls\'"
.format(instance2.ip_address, testphrase, testfile),
code=0,
timeout=10)
instance2.sys('cat {0} | grep {1}'.format(testfile, testphrase), code=0)
# add revoke may be covered above...?
def test_revoke_rules(self):
revoke_group = self.user.ec2.add_group("revoke-group-" + str(int(time.time())))
self.user.ec2.authorize_group(revoke_group, protocol='tcp', port=22)
for zone in self.zones:
instance = self.user.ec2.run_image(image=self.image,
keypair=self.keypair,
subnet_id = self.subnet_id,
group=revoke_group,
zone=zone)[0]
self.user.ec2.revoke_security_group(revoke_group, from_port=22, protocol='tcp')
self.log.debug('Sleeping for 60 seconds before retrying group')
time.sleep(60)
try:
instance.reset_ssh_connection(timeout=30)
self.user.ec2.delete_group(revoke_group)
raise Exception("Was able to SSH without authorized rule")
except SSHException, e:
self.log.debug("SSH was properly blocked to the instance")
self.user.ec2.authorize_group(revoke_group, protocol='tcp', port=22)
instance.reset_ssh_connection()
self.user.ec2.terminate_instances(instance)
self.user.ec2.delete_group(revoke_group)
def _run_suite(self, testlist=None, basic_only=False, exclude=None):
# The first tests will have the End On Failure flag set to true. If these tests fail
# the remaining tests will not be attempted.
unit_list = []
testlist = testlist or []
exclude = exclude or []
if exclude:
exclude = re.sub('[",]', " ", str(exclude)).split()
if testlist:
if not isinstance(testlist, list):
testlist.replace(',',' ')
testlist = testlist.split()
for test in testlist:
unit_list.append(nettests.create_testunit_by_name(test))
else:
unit_list =[
self.create_testunit_by_name('test1_create_instance_in_zones_for_security_group1',
eof=True),
self.create_testunit_by_name('test2_create_instance_in_zones_for_security_group2',
eof=True),
self.create_testunit_by_name(
'test3_test_ssh_between_instances_in_diff_sec_groups_same_zone', eof=True)]
if basic_only:
testlist = []
else:
# Then add the rest of the tests...
testlist = [ 'test4_attempt_unauthorized_ssh_from_test_machine_to_group2',
'test5_test_ssh_between_instances_in_same_sec_groups_different_zone',
'test7_add_and_revoke_tcp_port_range',
'test8_verify_deleting_of_auth_source_group2',
'test9_ssh_between_instances_same_group_same_zone_public',
'test10_ssh_between_instances_same_group_public_different_zone',
'test11_ssh_between_instances_same_group_same_zone_private',
'test12_ssh_between_instances_same_group_private_different_zone',
'test13_ssh_between_instances_diff_group_private_different_zone',
'test14_ssh_between_instances_diff_group_public_different_zone']
for test in exclude:
if test in testlist:
testlist.remove(test)
for test in testlist:
unit_list.append(self.create_testunit_by_name(test))
self.status('Got running the following list of tests:' + str(testlist))
### Run the EutesterUnitTest objects
result = self.run(unit_list,eof=False,clean_on_exit=True)
self.status('Test finished with status:"{0}"'.format(result))
return result
if __name__ == "__main__":
nettests = NetTestsClassic()
exit(nettests._run_suite(testlist=nettests.args.test_list))
| bsd-2-clause | 1,554,131,369,962,313,500 | 51.073973 | 131 | 0.52729 | false |
endlessm/chromium-browser | third_party/chromite/scripts/cros_oobe_autoconfig_unittest.py | 1 | 6578 | # -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for cros_oobe_autoconfig.py"""
from __future__ import print_function
import json
import os
import pwd
import sys
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import image_lib
from chromite.lib import osutils
from chromite.scripts import cros_oobe_autoconfig
pytestmark = [cros_test_lib.pytestmark_inside_only,
cros_test_lib.pytestmark_skip('https://crbug.com/1000761')]
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
_TEST_DOMAIN = 'test.com'
_TEST_CLI_PARAMETERS = (
'image.bin', '--x-demo-mode', '--x-network-onc', '{}',
'--x-network-auto-connect', '--x-eula-send-statistics',
'--x-eula-auto-accept', '--x-update-skip', '--x-wizard-auto-enroll',
'--enrollment-domain', _TEST_DOMAIN)
_TEST_CONFIG_JSON = {
'demo-mode': True,
'network-onc': '{}',
'network-auto-connect': True,
'eula-send-statistics': True,
'eula-auto-accept': True,
'update-skip': True,
'wizard-auto-enroll': True
}
_IMAGE_SIZE = 4 * 1024 * 1024
_BLOCK_SIZE = 4096
_SECTOR_SIZE = 512
_STATEFUL_SIZE = _IMAGE_SIZE // 2
_STATEFUL_OFFSET = 120 * _SECTOR_SIZE
class SanitizeDomainTests(cros_test_lib.TestCase):
"""Tests for SanitizeDomain()"""
def testASCII(self):
"""Tests that ASCII-only domains are not mangled."""
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain('FoO.cOm'), 'foo.com')
def testUnicodeCase(self):
"""Tests that ASCII-only domains are not mangled."""
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(u'föo.com'),
'xn--fo-fka.com')
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(u'fÖo.com'),
'xn--fo-fka.com')
def testHomographs(self):
"""Tests that a Unicode domain is punycoded."""
# "tеѕt.com" looks like "test.com" but isn't!
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(
u't\u0435\u0455t.com'), 'xn--tt-nlc2k.com')
class PrepareImageTests(cros_test_lib.MockTempDirTestCase):
"""Tests for PrepareImage()"""
def setUp(self):
"""Create a small test disk image for testing."""
self.image = os.path.join(self.tempdir, 'image.bin')
state = os.path.join(self.tempdir, 'state.bin')
# Allocate space for the disk image and stateful partition.
osutils.AllocateFile(self.image, _IMAGE_SIZE)
osutils.AllocateFile(state, _STATEFUL_SIZE)
commands = (
# Format the stateful image as ext4.
['/sbin/mkfs.ext4', state],
# Create the GPT headers and entry for the stateful partition.
['cgpt', 'create', self.image],
['cgpt', 'boot', '-p', self.image],
['cgpt', 'add', self.image, '-t', 'data',
'-l', str(constants.CROS_PART_STATEFUL),
'-b', str(_STATEFUL_OFFSET // _SECTOR_SIZE),
'-s', str(_STATEFUL_SIZE // _SECTOR_SIZE), '-i', '1'],
# Copy the stateful partition into the GPT image.
['dd', 'if=%s' % state, 'of=%s' % self.image, 'conv=notrunc', 'bs=4K',
'seek=%d' % (_STATEFUL_OFFSET // _BLOCK_SIZE),
'count=%s' % (_STATEFUL_SIZE // _BLOCK_SIZE)],
['sync'])
for cmd in commands:
cros_build_lib.run(cmd, quiet=True)
# Run the preparation script on the image.
cros_oobe_autoconfig.main([self.image] + list(_TEST_CLI_PARAMETERS)[1:])
# Mount the image's stateful partition for inspection.
self.mount_tmp = os.path.join(self.tempdir, 'mount')
osutils.SafeMakedirs(self.mount_tmp)
self.mount_ctx = image_lib.LoopbackPartitions(self.image, self.mount_tmp)
self.mount = os.path.join(self.mount_tmp,
'dir-%s' % constants.CROS_PART_STATEFUL)
self.oobe_autoconf_path = os.path.join(self.mount, 'unencrypted',
'oobe_auto_config')
self.config_path = os.path.join(self.oobe_autoconf_path, 'config.json')
self.domain_path = os.path.join(self.oobe_autoconf_path,
'enrollment_domain')
def testChronosOwned(self):
"""Test that the OOBE autoconfig directory is owned by chronos."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
chronos_uid = pwd.getpwnam('chronos').pw_uid
self.assertExists(self.oobe_autoconf_path)
self.assertEqual(os.stat(self.config_path).st_uid, chronos_uid)
def testConfigContents(self):
"""Test that the config JSON matches the correct data."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
with open(self.config_path) as fp:
data = json.load(fp)
self.assertEqual(data, _TEST_CONFIG_JSON)
def testDomainContents(self):
"""Test that the domain file matches the correct data."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
self.assertEqual(osutils.ReadFile(self.domain_path), _TEST_DOMAIN)
class GetConfigContentTests(cros_test_lib.MockTestCase):
"""Tests for GetConfigContent()"""
def testBasic(self):
"""Test that config is generated correctly with all options."""
opts = cros_oobe_autoconfig.ParseArguments(_TEST_CLI_PARAMETERS)
conf = cros_oobe_autoconfig.GetConfigContent(opts)
self.assertEqual(json.loads(conf), _TEST_CONFIG_JSON)
def testUnspecified(self):
"""Test that config is generated correctly with some options missing."""
cli = list(_TEST_CLI_PARAMETERS)
cli.remove('--x-update-skip')
expected = dict(_TEST_CONFIG_JSON)
expected['update-skip'] = False
opts = cros_oobe_autoconfig.ParseArguments(cli)
conf = cros_oobe_autoconfig.GetConfigContent(opts)
self.assertEqual(json.loads(conf), expected)
class MainTests(cros_test_lib.MockTestCase):
"""Tests for main()"""
def setUp(self):
self.PatchObject(cros_oobe_autoconfig, 'PrepareImage')
def testBasic(self):
"""Simple smoke test"""
cros_oobe_autoconfig.main(_TEST_CLI_PARAMETERS)
| bsd-3-clause | 1,724,904,975,207,223,000 | 36.352273 | 79 | 0.65896 | false |
jonberliner/keras | keras/optimizers.py | 1 | 7022 | from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .utils.theano_utils import shared_zeros, shared_scalar
from six.moves import zip
def clip_norm(g, c, n):
if c > 0:
g = T.switch(T.ge(n, c), g * c / n, g)
return g
def kl_divergence(p, p_hat):
return p_hat - p + p * T.log(p / p_hat)
class Optimizer(object):
def get_updates(self, params, constraints, loss):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = T.grad(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
return grads
def get_config(self):
return {"name": self.__class__.__name__}
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
updates = [(self.iterations, self.iterations + 1.)]
for p, g, c in zip(params, grads, constraints):
m = shared_zeros(p.get_value().shape) # momentum
v = self.momentum * m - lr * g # velocity
updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"momentum": self.momentum,
"decay": self.decay,
"nesterov": self.nesterov}
class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"rho": self.rho,
"epsilon": self.epsilon}
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = a + g ** 2 # update accumulator
updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"epsilon": self.epsilon}
class Adadelta(Optimizer):
'''
Reference: http://arxiv.org/abs/1212.5701
'''
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, d_a, c in zip(params, grads, accumulators, delta_accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon)
new_p = p - self.lr * update
updates.append((p, c(new_p))) # apply constraints
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2
updates.append((d_a, new_d_a))
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"rho": self.rho,
"epsilon": self.epsilon}
class Adam(Optimizer):
'''
Reference: http://arxiv.org/abs/1412.6980
Default parameters follow those provided in the original paper
lambda is renamed kappa.
'''
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, kappa=1-1e-8, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
updates = [(self.iterations, self.iterations+1.)]
i = self.iterations
beta_1_t = self.beta_1 * (self.kappa**i)
# the update below seems missing from the paper, but is obviously required
beta_2_t = self.beta_2 * (self.kappa**i)
for p, g, c in zip(params, grads, constraints):
m = theano.shared(p.get_value() * 0.) # zero init of moment
v = theano.shared(p.get_value() * 0.) # zero init of velocity
m_t = (beta_1_t * m) + (1 - beta_1_t) * g
v_t = (beta_2_t * v) + (1 - beta_2_t) * (g**2)
m_b_t = m_t / (1 - beta_1_t)
v_b_t = v_t / (1 - beta_2_t)
p_t = p - self.lr * m_b_t / (T.sqrt(v_b_t) + self.epsilon)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, c(p_t))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"kappa": self.kappa}
# aliases
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
from .utils.generic_utils import get_from_module
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs)
| mit | 6,855,293,725,898,082,000 | 32.122642 | 104 | 0.548277 | false |
errikos/amtt | amtt/exporter/isograph/__init__.py | 1 | 2915 | """Exporter module for Isograph Availability Workbench."""
import logging
import networkx as nx
from itertools import count
from amtt.translator.ir import component_basename
from amtt.exporter import Exporter
from amtt.exporter.isograph.emitter.xml import XmlEmitter
from amtt.exporter.isograph.rbd import Rbd
from amtt.exporter.isograph.failure_models import fm_export
_logger = logging.getLogger(__name__)
class IsographExporter(Exporter):
"""Exporter to export the model to Isograph."""
def __init__(self, translator):
"""Initialize IsographExporter."""
self._translator = translator
self._emitter = XmlEmitter(translator.output_basedir)
@staticmethod
def normalize_block_names(ir_container):
"""Normalize the component (block) names.
Isograph imposes a 40 character limit for the component names.
In case the model uses template components, there is a big chance that
the names will grow very big in length. Therefore, we store the
base name in the description field and assign a unique integer (ID)
as the components name.
"""
g = ir_container.component_graph
if ir_container.uses_templates:
_logger.info('Template usage detected:')
_logger.info(' * Normalizing component names for Isograph')
# Create relabeling mapping.
# Each component name will be replaced with a number (ID).
relabel_mapping = {n: c for n, c in zip(g.nodes_iter(), count(1))}
del relabel_mapping['ROOT'] # We don't want to relabel ROOT
# Relabel and rename components graph
# -- copy=False means "relabel in-place"
nx.relabel_nodes(g, relabel_mapping, copy=False)
for u, v in nx.bfs_edges(g, 'ROOT'):
# -- get a hold of the associated object
vo = g.node[v]['obj']
# -- set base name as description
vo.description = component_basename(vo.name)
# -- set ID number as name
vo.name = v
# Note: No need to relabel or rename failures graph
def export(self):
"""Export the model to Isograph importable format."""
# Normalize block names, if necessary
self.normalize_block_names(self._translator.ir_container)
# Export RBD (blocks, nodes, connections)
self._export_rbd()
# Export failure model definitions
self._export_failure_models()
# Write output file
self._emitter.commit()
def _export_rbd(self):
# Create block diagram from input
rbd = Rbd()
rbd.from_ir_container(self._translator.ir_container)
# Dump reliability block diagram to output
rbd.serialize(self._emitter)
def _export_failure_models(self):
fm_export(self._translator.ir_container, self._emitter)
| gpl-3.0 | 8,075,975,394,118,118,000 | 39.486111 | 78 | 0.64048 | false |
corredD/upy | autodeskmaya/mayaHelper.py | 1 | 118218 |
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/autodeskmaya/mayaHelper.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 23:30:44 2010
@author: Ludovic Autin - [email protected]
"""
import sys, os, os.path, struct, math, string
from math import *
#import numpy
from types import StringType, ListType
import maya
from maya import cmds,mel,utils
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import maya.OpenMayaFX as omfx
import pymel.core as pm
#base helper class
from upy import hostHelper
if hostHelper.usenumpy:
import numpy
from numpy import matrix
from upy.hostHelper import Helper
lefthand =[[ 1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]]
from upy.transformation import decompose_matrix
class MayaSynchro:
#period problem
def __init__(self,cb=None, period=0.1):
self.period = period
self.callback = None
self.timeControl = oma.MAnimControl()
if cb is not None :
self.doit = cb
def change_period(self,newP):
self.period = newP
self.remove_callback()
self.set_callback()
def set_callback(self):
self.callback = om.MTimerMessage.addTimerCallback(self.period,self.doit)
def remove_callback(self):
om.MMessage.removeCallback(self.callback)
def doit(self,*args,**kw):#period,time,userData=None):
pass
class mayaHelper(Helper):
"""
The maya helper abstract class
============================
This is the maya helper Object. The helper
give access to the basic function need for create and edit a host 3d object and scene.
"""
SPLINE = "kNurbsCurve"
INSTANCE = "kTransform"
MESH = "kTransform"
POLYGON = "kMesh"#"kTransform"
# MESH = "kMesh"
EMPTY = "kTransform"
BONES="kJoint"
PARTICULE = "kParticle"
SPHERE = "sphere"
CYLINDER = "cylinder"
CUBE = "cube"
IK="kIkHandle"
msutil = om.MScriptUtil()
pb = False
pbinited = False
host = "maya"
def __init__(self,master=None,**kw):
Helper.__init__(self)
self.updateAppli = self.update
self.Cube = self.box
self.Box = self.box
self.Geom = self.newEmpty
#self.getCurrentScene = c4d.documents.GetActiveDocument
self.IndexedPolygons = self.polygons
self.Points = self.PointCloudObject
self.pb = True
self.hext = "ma"
self.timeline_cb={}
self.LIGHT_OPTIONS = {"Area" : maya.cmds.ambientLight,
"Sun" : maya.cmds.directionalLight,
"Spot":maya.cmds.spotLight}
def fit_view3D(self):
pass#
def resetProgressBar(self,max=None):
"""reset the Progress Bar, using value"""
if self.pb :
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar');
maya.cmds.progressBar(gMainProgressBar, edit=True, endProgress=True)
self.pbinited = False
# self.pb = False
# maya.cmds.progressBar(maya.pb, edit=True, maxValue=max,progress=0)
def progressBar(self,progress=None,label=None):
""" update the progress bar status by progress value and label string
@type progress: Int/Float
@param progress: the new progress
@type label: string
@param label: the new message to put in the progress status
"""
if self.pb :
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar');
if not self.pbinited :
cmds.progressBar( gMainProgressBar,
edit=True,
beginProgress=True,
isInterruptable=False,
status=label,
maxValue=100)
# if progress == 1 :
# prev = cmds.progressBar(gMainProgressBar,q=1,progress=1)
# progress = prev/100. + 0.1
# progress*=100.
if label is not None and progress is None :
cmds.progressBar(gMainProgressBar, edit=True, status = label)
elif label is not None and progress is not None:
cmds.progressBar(gMainProgressBar, edit=True, progress=progress*100.,status = label)
elif label is None and progress is not None:
cmds.progressBar(gMainProgressBar, edit=True, progress=progress*100.)
if progress == 1 or progress == 100.:
self.resetProgressBar()
#maxValue = 100
#did not work
#maya.cmds.progressBar(maya.pb, edit=True, progress=progress*100)
# cmds.progressBar(maya.pb, edit=True, step=1)
#maya.cmds.progressBar(maya.pb, edit=True, step=1)
def synchronize(self,cb):
self.timeline_cb[cb] = MayaSynchro(cb=cb,period=0.05)
self.timeline_cb[cb].set_callback()
def unsynchronize(self,cb):
self.timeline_cb[cb].remove_callback()
def update(self,):
#how do I update the redraw
cmds.refresh()
def updateAppli(self,):
#how do I update the redraw
cmds.refresh()
def checkName(self,name):
invalid=[]
if type(name) is None :
print ("None name or not a string",name)
return ""
#sometime the name is a list ie [u'name']
if type(name) is list or type(name) is tuple :
if len(name) == 1 :
name = name[0]
elif len(name) == 2 :
name = name[1]#transform node
else :
name = name[0] #?
if (type(name) is not str and type(name) is not unicode) :
print ("not a string",name,type(name))
return ""
if not len(name):
print ("empty name",name)
for i in range(9):
invalid.append(str(i))
if type(name) is list or type(name) is tuple:
name = name[0]
if type(name) is not str and type(name) is not unicode:
name = name.name()
if len(name) and name[0] in invalid:
name= name[1:]
#also remove some character and replace it by _
name=name.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
return name
def setCurrentSelection(self,obj):
if obj is None :
return
if type (obj) is list or type (obj) is tuple :
for o in obj :
cmds.select(self.getObject(o))
else :
cmds.select(self.getObject(obj))
def getCurrentSelection(self):
slist = om.MSelectionList()
if not slist :
return []
om.MGlobal.getActiveSelectionList(slist)
selection = []
slist.getSelectionStrings(selection)
return selection
def checkPrimitive(self,object):
try :
cmds.polySphere(object,q=1,r=1)
return "sphere"
except :
pass
try :
cmds.sphere(object,q=1,r=1)
return "sphere"
except :
pass
try :
cmds.polyCube(object,q=1,w=1)
return "cube"
except :
pass
try :
cmds.polyCylinder(object,q=1,r=1)
return "cylinder"
except :
pass
return None
def getType(self,object):
#first tryto see if isa primitive
prim = self.checkPrimitive(object)
if prim is not None :
return prim
object = self.getNode(object)
if hasattr(object,"apiTypeStr"):
# print (object.apiTypeStr())
return object.apiTypeStr()
else :
# print (type(object))
return type(object)
# return type(object)
def getMName(self,o):
return o.name()
def setName(self,o,name):
if o is None :
return
cmds.rename( self.checkName(o), name, ignoreShape=False)
def getName(self,o):
if o is None: return ""
if type(o) == str or type(o) == unicode :
name = o.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
elif type(o) == unicode : name = o
elif type(o) is om.MFnMesh:
return o
elif hasattr(o,"name") :
if type(o.name) == str :
return o.name
else : return o.name()
elif type(o) is list or type(o) is tuple:
name=o[0]
else : name=o
return name
def getMObject(self,name):
# Create a selection list, get an MObject of the nodes which name is name
selectionList = om.MSelectionList()
selectionList.add( name ) #should be unic..
node = om.MObject()
selectionList.getDependNode( 0, node )
#//Create a function set, connect to it,
fnDep = om.MFnDependencyNode(node)
#print fnDep.name() #object name
#print fnDep.typeName() #type name ie mesh, transform etc..
return node,fnDep
def getObject(self,name,doit=True):
if type(name) is list or type(name) is tuple :
if len(name) == 1 :
name = name[0]
elif len(name) == 2 :
name = name[1]#transform node
else :
name = name[0] #?
name=self.checkName(name)
if name.find(":") != -1 :
name=name.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
if doit :
name=cmds.ls(name)
if len(name)==0:
return None
if len(name) == 1 :
return name[0]
return name
def checkIsMesh(self,poly):
if type(poly) is str or type(poly) is unicode :
mesh = self.getMShape(poly)#dagPath
else :
#have to a object shape node or dagpath
mesh = poly
try :
meshnode = om.MFnMesh(mesh)
return meshnode
except :
return mesh
def getMesh(self,name):
mesh = None
if type(name) != str:
return name
# path = om.MDagPath()
try :
name = self.checkName(name)
mesh = cmds.ls(name)#NMesh.GetRaw(name)
except:
mesh = None
return mesh
def getMeshFrom(self,obj):
if type(obj) is not str and type(obj) is not unicode:
obj = self.getMName(obj)
return self.getMShape(obj)
def getTransformNode(self,name):
if type(name) is list :
name = name[0]
if type(name) is str or type(name) is unicode :
name = self.checkName(name)
node = self.getNode(name)
else :
node = name
dag = om.MFnDagNode(node)
path = om.MDagPath()
dag.getPath(path)
return path.transform(),path
def getMShape(self,name,):
# print name,type(name)
if type(name) is list :
name = name[0]
if type(name) is str or type(name) is unicode :
name = self.checkName(name)
node = self.getNode(name)
else :
node = name
dag = om.MFnDagNode(node)
path = om.MDagPath()
dag.getPath(path)
# self.msutil.createFromInt(0)
# pInt = self.msutil.asUintPtr()
# path.numberOfShapesDirectlyBelow(pInt)
try :
path.extendToShape()
return path
except :
# if self.msutil.getUint(pInt) == 0 :
node = path.child(0)
return self.getMShape(node)
#problem with primitive
# try :
# path.extendToShape()
# except :
# path = None
# return path
def deleteObject(self,obj):
sc = self.getCurrentScene()
if type(obj) is str or type(obj) is unicode:
obj=self.checkName(obj)
else :
if type(obj) is list or type(obj) is tuple :
for o in obj :
self.deleteObject(o)
else :
obj = obj.name()
try :
#print "del",obj
cmds.delete(obj)
except:
print "problem deleting ", obj
#######Special for maya#######################
def getNode( self,name ):
# print "getNode",type(name)
# if type(name) != str :
# return name
name = self.checkName(name)
selectionList = om.MSelectionList()
selectionList.add( name )
node = om.MObject()
selectionList.getDependNode( 0, node )
return node
def getNodePlug(self, attrName, nodeObject ):
"""
example:
translatePlug = nameToNodePlug( "translateX", perspNode )
print "Plug name: %s" % translatePlug.name()
print "Plug value %g" % translatePlug.asDouble()
"""
depNodeFn = om.MFnDependencyNode( nodeObject )
attrObject = depNodeFn.attribute( attrName )
plug = om.MPlug( nodeObject, attrObject )
return plug
################################################
def newLocator(self,name,location=None,**kw):
name = self.checkName(name)
if name.find(":") != -1 : name=name.replace(":","_")
empty=cmds.spaceLocator( n=name, a=True)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.reParent(empty,parent)
return str(empty)
def newEmpty(self,name,location=None,**kw):
#return self.newLocator(name,location=location, **kw)
name = self.checkName(name)
if name.find(":") != -1 : name=name.replace(":","_")
empty=cmds.group( em=True, n=name)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.reParent(empty,parent)
return str(empty)
def updateMasterInstance(self,master, newobjects,instance=True, **kw):
"""
Update the reference of the passed instance by adding/removing-hiding objects
* overwrited by children class for each host
>>> sph = helper.Sphere("sph1")
>>> instance_sph = helper.newInstance("isph1",sph,location = [10.0,0.0,0.0])
@type instance: string/hostObj
@param instance: name of the instance
@type objects: list hostObject/string
@param objects: the list of object to remove/add to the instance reference
@type add: bool
@param add: if True add the objec else remove
@type hide: bool
@param hide: hide instead of remove
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
#the instance shoud point to an empy that have shape as child
#what we should do is eitherduplicae or reParent the the new object under this master parent
#or usethe replace command ? use particule ?
#replace the mesh node of the master by the given ones....
#hide and remove every previous children....
chs = self.getChilds(master)
for o in chs :
r=cmds.duplicate(o, renameChildren=True)
print r
cmds.delete(chs)#or move or uninstance ?
if instance :
n=[]
for o in newobjects :
name = self.getName(master)+"Instance"
i1=self.getObject(name+"1")
if i1 is not None :
cmds.delete(i1)
i=self.newInstance(name,o,parent=master)
else :
self.reParent(newobjects,master)
def newMInstance(self,name,object,location=None,
hostmatrice=None,matrice=None,parent=None,**kw):
#first create a MObject?
#only work on Mes
name = self.checkName(name)
fnTrans = om.MFnTransform()
minstance = fnTrans.create()
fnTrans.setName(name)
#now add the child as an instance.
#print fnTrans.name()
#is this will work withany object ?
object=self.getNode(object)#or the shape ?
fnTrans.addChild(object,fnTrans.kNextPos,True)
#print name, object , fnTrans
if matrice is not None and isinstance(matrice,om.MTransformationMatrix):
hostmatrice=matrice
matrice = None
if hostmatrice is not None and not isinstance(hostmatrice,om.MTransformationMatrix):
matrice = hostmatrice
hostmatrice = None
if location is not None :
fnTrans.setTranslation(self.vec2m(location),om.MSpace.kPostTransform)
elif hostmatrice is not None :
fnTrans.set(hostmatrice)
elif matrice is not None :
#first convert
hmatrice = self.matrixp2m(matrice)
fnTrans.set(hmatrice)
if parent is not None:
mparent = self.getNode(parent)
# onode = om.MFnDagNode(mobj)
# print "name",fnTrans.name()
oparent = om.MFnDagNode(mparent)
oparent.addChild(self.getNode(fnTrans.name()),oparent.kNextPos,False)
return fnTrans.name()
def newInstance(self,name,object,location=None,hostmatrice=None,matrice=None,
parent=None,material=None,**kw):
#instance = None#
#instance parent = object
#instance name = name
# return self.newMInstance(name,object,location=location,
# hostmatrice=hostmatrice,matrice=matrice,parent=parent,**kw)
#
name = self.checkName(name)
instance = cmds.instance(object,name=name)
if location != None :
#set the position of instance with location
cmds.move(float(location[0]),float(location[1]),float(location[2]), name,
absolute=True )
if matrice is not None :
if self._usenumpy :
#matrice = numpy.array(matrice)#matrix(matrice)*matrix(lefthand)#numpy.array(matrice)
#transpose only rotation
matrice = numpy.array(matrice).transpose()#we do transpoe hee
#m = matrice.copy()
# m[0,:3]=matrice[0,:3]#thi work with numpy
# m[1,:3]=matrice[1,:3]
# m[2,:3]=matrice[2,:3]
#matrice[:3,:3] = matrice[:3,:3].transpose()
hm = matrice.reshape(16,).tolist()
#shoudl I apply some transformatio first ?
cmds.xform(name, a=True, m=hm,roo="xyz")#a for absolute
else :
self.setTransformation(instance[0],mat=matrice)
#set the instance matrice
#self.setObjectMatrix(self,object,matrice=matrice,hostmatrice=hostmatrice)
if parent is not None:
self.reParent(instance,parent)
if material is not None:
self.assignMaterial(instance,material)
return instance
#alias
setInstance = newInstance
def matrixToParticles(self,name,matrices,vector=[0.,1.,0.],transpose=True,**kw):#edge size ?
#blender user verex normal for rotated the instance
#quad up vector should use the inpu vector
axe=self.rerieveAxis(vector)
#axe="+Y"
quad=numpy.array(self.quad[axe])#*10.0
print ("matrixToParticles",axe,vector,quad)
# f=[0,1,2,3]
v=[]
f=[]
e=[]
n=[]
vi=0
#one mat is
#rot[3][:3] tr
# rot[:3,:3] rot
#create particle system
# obj = self.checkName(obj)
# partO=self.getMShape(obj) #shape..
# fnP = omfx.MFnParticleSystem(partO)
# oriPsType = fnP.renderType()
rot=om.MVectorArray()#fnP.count())
pos=om.MVectorArray()#fnP.count())
tr=[]
#set position and rotation
for i,m in enumerate(matrices):
mat = numpy.array(m)
if transpose :
mat = numpy.array(m).transpose()
# t = m[3][:3]
# rot = m[:3,:3]
scale, shear, euler, translate, perspective=decompose_matrix(mat)
tr.append(translate.tolist())
#need euler angle
# e=self.FromMat(rot).rotation().asEulerRotation()
p = om.MVector( float(translate[0]),float(translate[1]),float(translate[2]) )
pos.append(p)
r = om.MVector( float(euler[0]),float(euler[1]),float(euler[2]) )/(math.pi) *180
rot.append(r)
# fnP.setPerParticleAttribute("rotationPP",rot)
# fnP.setPerParticleAttribute("position",pos)
part,partShape= pm.nParticle(n=name+"_ps",position = tr)
# part,partShape=cmds.particle(n=name+"_ps",p=list(tr))
pm.setAttr('nucleus1.gravity', 0.0)#?
# cmds.setAttr(partShape+'.computeRotation',1)
partShape.computeRotation.set(True)
pm.addAttr(partShape, ln = 'rotationPP', dt = 'vectorArray')
pm.addAttr(partShape, ln = 'rotationPP0', dt = 'vectorArray')
particle_fn = omfx.MFnParticleSystem(partShape.__apimobject__())
particle_fn.setPerParticleAttribute('rotationPP', rot)
particle_fn.setPerParticleAttribute('rotationPP0', rot)
if 'parent' in kw and kw['parent'] is not None:
parent = self.getObject(kw['parent'])
self.reParent(name+"_ps",parent)
return part,partShape
#particleInstancer -addObject
#-object locator1 -cycle None -cycleStep 1 -cycleStepUnits Frames
#-levelOfDetail Geometry -rotationUnits Degrees
#-rotationOrder XYZ -position worldPosition -age age crn_A_clouddsShape;
def instancePolygon(self,name, matrices=None,hmatrices=None, mesh=None,parent=None,
transpose=False,globalT=True,**kw):
hm = False
if hmatrices is not None :
matrices = hmatrices
hm = True
if matrices == None : return None
if mesh == None : return None
instance = []
#print len(matrices)#4,4 mats
if self.instance_dupliFace:
v=[0.,1.,0.]
if "axis" in kw and kw["axis"] is not None:
v=kw["axis"]
print ("axis",v)
o = self.getObject(name+"_pis")
if o is None :
# o,m=self.matrixToVNMesh(name,matrices,vector=v)
particle,partShape=self.matrixToParticles(name,matrices,vector=v,
transpose=transpose,parent=parent)
p_instancer = pm.PyNode(pm.particleInstancer(
partShape, addObject=True, object=pm.ls(mesh),name=name+"_pis",
cycle='None', cycleStep=1, cycleStepUnits='Frames',
levelOfDetail='Geometry', rotationUnits='Degrees',
rotationOrder='XYZ', position='worldPosition', age='age'))
pm.particleInstancer(partShape, name = p_instancer, edit = True, rotation = "rotationPP")
if parent is not None :
self.reParent(name+"_pis",parent)
# cmds.particleInstancer(
# partShape, addObject=True, object=self.getMShape(mesh),
# cycle='None', cycleStep=1, cycleStepUnits='Frames',
# levelOfDetail='Geometry', rotationUnits='Degrees',
# rotationOrder='XYZ', position='worldPosition', age='age')
# cmds.particleInstancer(partShape, name = "p_instancer",
# edit = True, rotation = "rotationPP")
else :
#update
pass
return name+"_pis"
#rotation checkbox->use normal
else :
for i,mat in enumerate(matrices):
inst = self.getObject(name+str(i))
if inst is None :
#Minstance?
if hm :
inst=self.newInstance(name+str(i),mesh,hostmatrice=mat,
parent=parent,globalT=globalT)
else :
inst=self.newInstance(name+str(i),mesh,matrice=mat,
parent=parent,globalT=globalT)
instance.append(inst)
return instance
def resetTransformation(self,name):
m= [1.,0.,0.,0.,
0.,1.,0.,0.,
0.,0.,1.,0.,
0.,0.,0.,0.]
cmds.xform(name, a=True, m=m)
def setObjectMatrix(self,object,matrice,hostmatrice=None,**kw):
"""
set a matrix to an hostObject
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
#have to manipulate the DAG/upper transform node...
#let just take the owner Transofrm node of the shape
#we should be able to setAttr either 'matrix' or 'worldMatrix'
object = self.getObject(object)
if hostmatrice !=None :
#set the instance matrice
matrice=hostmatrice
if matrice != None:
#convert the matrice in host format
#set the instance matrice
pass
transpose = True
if "transpose" in kw :
transpose = kw["transpose"]
if matrice is not None :
if self._usenumpy :
#matrice = numpy.array(matrice)#matrix(matrice)*matrix(lefthand)#numpy.array(matrice)
#transpose only rotation
matrice = numpy.array(matrice)
if transpose :
matrice=matrice.transpose()#we do transpoe hee
#m = matrice.copy()
# m[0,:3]=matrice[0,:3]#thi work with numpy
# m[1,:3]=matrice[1,:3]
# m[2,:3]=matrice[2,:3]
#matrice[:3,:3] = matrice[:3,:3].transpose()
hm = matrice.reshape(16,).tolist()
#shoudl I apply some transformatio first ?
cmds.xform(object, a=True, m=hm,roo="xyz")#a for absolute
else :
self.setTransformation(object,mat=matrice)
def concatObjectMatrix(self,object,matrice,hostmatrice=None):
"""
apply a matrix to an hostObject
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
#get current transformation
if hostmatrice !=None :
#compute the new matrix: matrice*current
#set the new matrice
pass
if matrice != None:
#convert the matrice in host format
#compute the new matrix: matrice*current
#set the new matrice
pass
def addObjectToScene(self,doc,obj,parent=None,**kw):
#its just namely put the object under a parent
#return
if obj == None : return
if parent is not None :
if type(obj) is list or type(obj) is tuple :
if len(obj) == 1 :
obj = obj[0]
elif len(obj) == 2 :
obj = obj[1]#transform node
else :
obj = obj[0] #?
obj=self.checkName(obj)
parent=self.checkName(parent)
#print obj,parent
# cmds.parent( obj, parent)
self.parent(obj, parent)
def parent(self,obj,parent,instance=False):
if type(parent) == unicode :
parent = str(parent)
if type(parent) != str :
print ("parent is not String ",type(parent))
return
# print ("parenting ", obj,parent, instance )
mobj = self.getNode(obj)
mparent = self.getNode(parent)
# onode = om.MFnDagNode(mobj)
oparent = om.MFnDagNode(mparent)
# print ("parenting dag node", obj,parent, mobj,oparent.kNextPos,instance )
oparent.addChild(mobj,oparent.kNextPos,instance)
def reParent(self,obj,parent,instance=False):
if parent == None :
print ("parent is None")
return
if type(obj) is not list and type(obj) is not tuple :
obj = [obj,]
try :
[self.parent(o,parent,instance=instance) for o in obj]
except :
print ("failure")
def getChilds(self,obj):
if type(obj) is str or type(obj) is unicode:
o = self.checkName(obj)
else :
o = self.getName(obj)
childs= cmds.listRelatives(o, c=True)
if childs is None :
return []
else :
return childs
def addCameraToScene(self,name,Type='persp',focal=30.0,center=[0.,0.,0.],sc=None):
# Create a camera and get the shape name.
cameraName = cmds.camera(n=name)
cameraShape = cameraName[1]
# Set the focal length of the camera.
cmds.camera(cameraShape, e=True, fl=focal)
#change the location
cmds.move(float(center[0]),float(center[1]),float(center[2]), cameraName[0], absolute=True )
#should I rotate it
cmds.rotate( 0, '0', '360deg',cameraName[0] )
# Change the film fit type.
#cmds.camera( cameraShape, e=True, ff='overscan' )
return cameraName
def addLampToScene(self,name,Type='Area',rgb=[1.,1.,1.],dist=25.0,energy=1.0,
soft=1.0,shadow=False,center=[0.,0.,0.],sc=None,**kw):
#print Type
#each type have a different cmds
lcmd = self.LIGHT_OPTIONS[Type]
light = lcmd(n=name)
# light = cmds.pointLight(n=name)
#cmds.pointLight(light,e=1,i=energy,rgb=rgb,ss=soft,drs=dist)
lcmd(light,e=1,i=energy)
lcmd(light,e=1,ss=soft)
# cmds.pointLight(light,e=1,drs=dist)
lcmd(light,e=1,rgb=rgb)
cmds.move(float(center[0]),float(center[1]),float(center[2]), light, absolute=True )
return light
def toggleDisplay(self,ob,display,**kw):
# ob = self.getObject(ob)
# if ob is None :
# return
# ob=self.checkName(ob)
# if display :
# cmds.showHidden(ob)
# else :
# cmds.hide(ob)
if ob is None :
return
node = self.getNode(self.checkName(ob))
if node is None :
return
attrDis = self.getNodePlug("visibility",node)
attrDis.setBool(bool(display))
# def toggleXray(self,object,xray):
# o = self.getObject(object)
# cmds.select(o)
# cmds.displySurface(xRay = True)
def getVisibility(self,obj,editor=True, render=False, active=False):
#0 off, 1#on, 2 undef
node = self.getNode(self.checkName(obj))
attrDis = self.getNodePlug("visibility",node)
if editor and not render and not active:
return attrDis.asBool()
elif not editor and render and not active:
return attrDis.asBool()
elif not editor and not render and active:
return attrDis.asBool()
else :
return attrDis.get(),attrDis.get(),attrDis.get()
def getTranslation(self,name,absolue=True):
name = self.checkName(name)
return self.FromVec(cmds.xform(name,q=1,ws=int(absolue),t=1))
def getTranslationOM(self,name):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
return fnTrans.getTranslation(om.MSpace.kWorld)#kPostTransform)
def setTranslation(self,name,pos):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
newT = self.vec2m(pos)
fnTrans.setTranslation(newT,om.MSpace.kPostTransform)
def translateObj(self,obj,position,use_parent=False):
#is om would be faster ?
if len(position) == 1 : c = position[0]
else : c = position
#print "upadteObj"
newPos=c#c=c4dv(c)
o=self.getObject(obj)
if use_parent :
parentPos = self.getPosUntilRoot(obj)#parent.get_pos()
c = newPos - parentPos
cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
else :
cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
def scaleObj(self,obj,sc):
obj = self.checkName(obj)
if type(sc) is float :
sc = [sc,sc,sc]
cmds.scale(float(sc[0]),float(sc[1]),float(sc[2]), obj,absolute=True )
def getScale(self,name,absolue=True,**kw):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
# First create an array and a pointer to it
scaleDoubleArray = om.MScriptUtil()
scaleDoubleArray.createFromList( [0.0, 0.0, 0.0], 3 )
scaleDoubleArrayPtr = scaleDoubleArray.asDoublePtr()
# Now get the scale
fnTrans.getScale( scaleDoubleArrayPtr )
# Each of these is a decimal number reading from the pointer's reference
x_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 0 )
y_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 1 )
z_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 2 )
return [x_scale,y_scale,z_scale]#kPostTransform) or om.MVector(v[0], v[1], v[2])?
def getSize(self,obj):
#take degree
obj = self.checkName(obj)
meshnode = self.getMShape(obj)
try :
mesh = om.MFnMesh(meshnode)
except :
return [1,1,1]
obj = self.getMName(mesh)
x=cmds.getAttr(obj+'.width')
y=cmds.getAttr(obj+'.height')
z=cmds.getAttr(obj+'.depth')
return [x,y,z]
def rotateObj(self,obj,rot):
#take degree
obj = self.checkName(obj)
cmds.setAttr(obj+'.rx',degrees(float(rot[0])))
cmds.setAttr(obj+'.ry',degrees(float(rot[1])))
cmds.setAttr(obj+'.rz',degrees(float(rot[2])))
def getTransformation(self,name):
node = self.getNode(name)
fnTrans = om.MFnTransform(node)
mmat = fnTrans.transformation()
#maya matrix
return mmat
def setTransformation(self,name,mat=None,rot=None,scale=None,trans=None,order="str",**kw):
node = self.getNode(name)
fnTrans = om.MFnTransform(node)
if mat is not None :
if isinstance(mat,om.MTransformationMatrix):
fnTrans.set(mat)
else :
fnTrans.set(self.matrixp2m(mat))
if trans is not None :
fnTrans.setTranslation(self.vec2m(trans),om.MSpace.kPostTransform)
if rot is not None :
rotation = om.MEulerRotation (rot[0], rot[1], rot[2])
fnTrans.setRotation(rotation)
if scale is not None :
fnTrans.setScale(self.arr2marr(scale))
def ObjectsSelection(self,listeObjects,typeSel="new"):
"""
Modify the current object selection.
@type listeObjects: list
@param listeObjects: list of object to joins
@type typeSel: string
@param listeObjects: type of modification: new,add,...
"""
dic={"add":True,"new":False}
sc = self.getCurrentScene()
for obj in listeObjects:
cmds.select(self.getObject(obj),add=dic[typeSel])
#Put here the code to add/set an object to the current slection
#[sc.SetSelection(x,dic[typeSel]) for x in listeObjects]
def JoinsObjects(self,listeObjects):
"""
Merge the given liste of object in one unique geometry.
@type listeObjects: list
@param listeObjects: list of object to joins
"""
sc = self.getCurrentScene()
#put here the code to add the liste of object to the selection
cmds.select(self.getObject(listeObjects[0]))
for i in range(1,len(listeObjects)):
cmds.select(listeObjects[i],add=True)
cmds.polyUnite()
#no need to joins? but maybe better
#then call the command/function that joins the object selected
# c4d.CallCommand(CONNECT)
#need face indice
def color_mesh_perVertex(self,mesh,colors,faces=None,perVertex=True,
facesSelection=None,faceMaterial=False):
if colors[0] is not list and len(colors) == 3 :
colors = [colors,]
if not isinstance(mesh,maya.OpenMaya.MFnMesh):
if self.getType(mesh) != self.POLYGON and self.getType(mesh) != self.MESH:
return False
mcolors=om.MColorArray()
iv=om.MIntArray()
meshnode = mesh
# print mesh
if type(mesh) is str or type(mesh) is unicode :
meshnode = self.getMShape(mesh)
try :
mesh = om.MFnMesh(meshnode)
except:
return False
mesh.findPlug('displayColors').setBool(True)
if not isinstance(mesh,maya.OpenMaya.MFnMesh):
return
nv=mesh.numVertices()
nf=mesh.numPolygons()
mfaces = self.getMeshFaces(meshnode)
if facesSelection is not None :
if type(facesSelection) is bool :
fsel,face_sel_indice = self.getMeshFaces(mesh,selected=True)
else :
face_sel_indice = facesSelection
fsel=[]
for i in face_sel_indice:
fsel.append(mfaces[i])
vsel=[]
for f in fsel:
for v in f:
if v not in vsel:
vsel.append(v)
mfaces = fsel
nf = len(fsel)
nv = len(vsel)
# print "selected ",face_sel_indice
#check if its ok
if len(colors) == nv:
perVertex = True
elif len(colors) == nf:
perVertex = False
if perVertex:
N=range(nv)
else :
N=range(nf)
if facesSelection is not None :
N = face_sel_indice
perVertex = False
for k,i in enumerate(N) :
if len(colors) == 1 : ncolor = colors[0]
else :
if k >= len(colors) :
ncolor = [0.,0.,0.] #problem
else :
ncolor = colors[i]
#print ncolor
#if max(ncolor) < 1 : ncolor = map( lambda x: x*255, ncolor)
col=om.MColor(float(ncolor[0]),float(ncolor[1]),float(ncolor[2]))
#print ncolor
mcolors.append(col)
iv.append(int(i))
# print "i",i,ncolor
#mesh.setVertexColor(col,int(i))
if perVertex:
mesh.setVertexColors(mcolors,iv)
else :
# print iv#should be the fdace index
mesh.setFaceColors(mcolors,iv)
return True
###################MATERIAL CODE FROM Rodrigo Araujo#####################################################################################
#see http://linil.wordpress.com/2008/01/31/python-maya-part-2/
def createMaterial(self, name, color, type ):
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
shaderSG = cmds.sets(renderable=True, noSurfaceShader=True, empty=True,
name=name+"SG" )
#material
cmds.shadingNode( type, asShader=True, name=name )
#phong ?
#cmds.setAttr((shader+ '.reflectivity'), 0)# no rayTrace
#cmds.setAttr((shader+ '.cosinePower'), 3)
cmds.setAttr( name+".color", color[0], color[1], color[2],
type="double3")
cmds.connectAttr(name+".outColor", shaderSG+".surfaceShader")
def createTexturedMaterial(self,name,filename):
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
shaderSG = cmds.sets(renderable=True, noSurfaceShader=True, empty=True,
name=name+"SG" )
#material
cmds.shadingNode("lambert", asShader=True, name=name )
cmds.connectAttr(name+".outColor", shaderSG+".surfaceShader")
#create the texture and connect it
texture = cmds.shadingNode('file', asTexture=True,name=name+"Texture")
cmds.connectAttr(name+"Texture"+'.outColor', name+".color")
cmds.setAttr(name+"Texture"+'.fileTextureName', filename, type='string')
return name
def create_mMayaMaterials(self):
existingSGs = cmds.ls(type = 'shadingEngine')
shaderHits = 0;
shaderSG, shaderSGAmbOcc, ambOcc, ramp = '', '', '', ''
for existingSG in existingSGs:
if mel.eval('attributeExists mMaya_atomShaderSG ' +existingSG):
shaderSG = existingSG
shaderHits += 1
if mel.eval('attributeExists mMaya_atomShaderSGAmbOcc ' +existingSG):
shaderSGAmbOcc = existingSG
shaderHits += 1
existingAmbOccs = cmds.ls(type = 'mib_amb_occlusion')
for existingAmbOcc in existingAmbOccs:
if mel.eval('attributeExists mMaya_atomShaderAmbOcc ' +existingAmbOcc):
ambOcc = existingAmbOcc
shaderHits += 1
existingRamps = cmds.ls(type = 'ramp')
for existingRamp in existingRamps:
if mel.eval('attributeExists mMaya_atomShaderRGBRamp ' +existingRamp):
ramp = existingRamp
shaderHits += 1
if shaderHits == 4:
return shaderSG, shaderSGAmbOcc, ambOcc, ramp
elif shaderHits == 0:
shader = cmds.shadingNode('phong', asShader = 1, name = ("atomShader"))
cmds.setAttr((shader+ '.reflectivity'), 0)# no rayTrace
cmds.setAttr((shader+ '.cosinePower'), 3)
shaderSG = cmds.sets(renderable = 1, noSurfaceShader = 1, empty = 1)
cmds.addAttr(shaderSG, ln = 'mMaya_atomShaderSG', at = 'bool', h = 1)
cmds.connectAttr((shader+ '.outColor'), (shaderSG+ '.surfaceShader'))
shaderAmbOcc = cmds.shadingNode('phong', asShader = 1, name = ("atomShaderAmbOcc"))
cmds.setAttr((shaderAmbOcc+ '.reflectivity'), 0)
cmds.setAttr((shaderAmbOcc+ '.cosinePower'), 3)
cmds.setAttr((shaderAmbOcc+ '.ambientColor'), 0.7, 0.7, 0.7)
cmds.setAttr((shaderAmbOcc+ '.diffuse'), 0.2)
ambOcc = cmds.createNode('mib_amb_occlusion')
cmds.addAttr(ambOcc, ln = 'mMaya_atomShaderAmbOcc', at = 'bool', h = 1)
cmds.connectAttr((ambOcc+ '.outValue'), (shaderAmbOcc+ '.color'))
cmds.connectAttr((shaderAmbOcc+ '.color'), (shaderAmbOcc+ '.specularColor'))
partySampler = cmds.createNode('particleSamplerInfo')
cmds.connectAttr((partySampler+ '.outTransparency'), (shader+ '.transparency'))
cmds.connectAttr((partySampler+ '.outIncandescence'), (shader+ '.incandescence'))
cmds.connectAttr((partySampler+ '.outColor'), (shader+ '.color'))
cmds.connectAttr((partySampler+ '.outTransparency'), (shaderAmbOcc+ '.transparency'))
cmds.connectAttr((partySampler+ '.outIncandescence'), (shaderAmbOcc+ '.incandescence'))
cmds.connectAttr((partySampler+ '.outColor'), (ambOcc+ '.bright'))
shaderSGAmbOcc = cmds.sets(renderable = 1, noSurfaceShader = 1, empty = 1)
cmds.addAttr(shaderSGAmbOcc, ln = 'mMaya_atomShaderSGAmbOcc', at = 'bool', h = 1)
cmds.connectAttr((shaderAmbOcc+ '.outColor'), (shaderSGAmbOcc+ '.surfaceShader'))
ramp = cmds.createNode('ramp')
cmds.setAttr((ramp + '.interpolation'), 0)
cmds.addAttr(ramp, ln = 'mMaya_atomShaderRGBRamp', at = 'bool', h = 1)
valChangePMA = cmds.createNode('plusMinusAverage')
cmds.addAttr(valChangePMA, ln = 'mMaya_atomShaderRGBRampPMA', at = 'bool', h = 1)
cmds.connectAttr((ramp+ '.mMaya_atomShaderRGBRamp'), (valChangePMA+ '.mMaya_atomShaderRGBRampPMA'))
indexDivFactor = 1000.0;
for elem in elems:
indexElem = vanRad_CPK[elem][4]
col = vanRad_CPK[elem][1:-1]
cmds.setAttr((ramp + '.colorEntryList[' +str(indexElem)+ '].position'), (indexElem/indexDivFactor))
#cmds.setAttr((ramp + '.colorEntryList[' +str(indexElem)+ '].color'), col[0], col[1], col[2], type = 'double3')
shade = cmds.shadingNode('surfaceShader', asTexture = 1)
cmds.setAttr((shade + '.outColor'), col[0], col[1], col[2], type = 'double3')
cmds.connectAttr((shade+ '.outColor'), (ramp+ '.colorEntryList[' +str(indexElem)+ '].color'))
cmds.connectAttr((shade+ '.outColor'), (valChangePMA+ '.input3D[' +str(indexElem)+ ']'))
cmds.rename(shade, elems[elem])
return shaderSG, shaderSGAmbOcc, ambOcc, ramp
else:
mel.eval('error "a mMaya default shader has been deleted"')
def addMaterial(self, name, color ):
if color is None :
color = (1.,0.,0.)
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
cmds.sets( renderable=True, noSurfaceShader=True, empty=True, name=name+"SG" )
#material
# = name[1:]
cmds.shadingNode( 'lambert', asShader=True, name=name )
cmds.setAttr( name+".color", color[0], color[1], color[2], type="double3")
cmds.connectAttr(name+".outColor", name+"SG.surfaceShader")
mat = cmds.ls(name, mat=True)
return mat
def assignMaterial(self,object,matname,texture = True,**kw):
object = self.getObject(object,doit=True)
#print "assign " , matname
#print matname
if type(matname) != list :
# name = name.replace(":","_")
matname = self.checkName(matname)
mat=cmds.ls(matname, mat=True)
else :
if type(matname[0]) is list :
mat = matname[0]
matname = str(matname[0][0])
else :
mat = matname
matname = str(matname[0])
#print "find " ,mat
matname = self.checkName(matname)
# if not mat:
# self.createMaterial (matname, (1.,1.,1.), 'lambert')
# conn = cmds.listConnections(cmds.listHistory(object))
## if len(conn) >= 2:
# shade = cmds.listHistory(object)[0].split('|')[1]
# cmds.hyperShade( matname,o=shade,assign=True )
#print 'assign ',object,matname
# print mat,matname
try :
cmds.sets(object, edit=True, forceElement=matname+"SG")
except :
print "problem assigning mat" + matname + " to object "+object
def assignNewMaterial(self, matname, color, type, object):
print matname, color, type, object
self.createMaterial (matname, color, type)
self.assignMaterial (object,matname)
def colorMaterial(self,matname, color):
matname=self.getMaterial(matname)
if len(matname)==1:
matname=matname[0]
cmds.setAttr( str(matname)+".color", color[0], color[1], color[2], type="double3")
def getMaterial(self,matname):
if type(matname) != str :
return matname
matname = self.checkName(matname)
mat=cmds.ls(matname, mat=True)
if len(mat)==0:
return None
else :
return mat
def getMaterialName(self,mat):
return str(mat)
def getAllMaterials(self):
#return unicode list of material
#mat=getMaterials()
matlist=cmds.ls(mat=True)#[]
return matlist
def getMaterialObject(self,obj):
obj = self.getObject(obj)
matnames = cmds.listConnections(cmds.listHistory(obj,f=1),type='lambert')
return matnames
def changeObjColorMat(self,obj,color):
#obj should be the object name, in case of mesh
#in case of spher/cylinder etc...atom name give the mat name
#thus matname should be 'mat_'+obj
obj = self.checkName(obj)
matname = "mat_"+str(obj)
self.colorMaterial(matname,color)
def changeColor(self,mesh,colors,perVertex=True,perObjectmat=None,pb=False,
facesSelection=None,faceMaterial=False):
#if hasattr(geom,'obj'):obj=geom.obj
#else : obj=geom
#mesh = self.getMesh(mesh)
if colors[0] is not list and len(colors) == 3 :
colors = [colors,]
print "change color",type(mesh),mesh
res = self.color_mesh_perVertex(mesh,colors,perVertex=perVertex,
facesSelection=facesSelection,
faceMaterial=faceMaterial)
if not res or len(colors) == 1:
#simply apply the color/material to mesh
#get object material, if none create one
# print "material assign"
mats = self.getMaterialObject(mesh)
# print mats
if not mats :
self.assignNewMaterial("mat"+self.getName(mesh), colors[0],
'lambert', mesh)
else :
self.colorMaterial(mats[0],colors[0])
def getMaterialProperty(self,material, **kw):
"""
Change a material properties.
* overwrited by children class for each host
@type material: string/Material
@param material: the material to modify
- color
- specular
- ...
"""
mat =self.getMaterial(material)
if len(mat)==1:
mat=mat[0]
res = {}
if mat is None :
return
if "specular" in kw :
res["specular"] = True#mat[c4d.MATERIAL_USE_SPECULAR]
if "specular_color" in kw :
res["specular_color"] = [0,0,0]#self.ToVec(mat[c4d.MATERIAL_SPECULAR_COLOR],pos=False)
if "specular_width" in kw :
res["specular_width"] = 0#mat[c4d.MATERIAL_SPECULAR_WIDTH]
if "color" in kw :
res["color"] = cmds.getAttr( str(mat)+".color")[0]
if "diffuse" in kw :
res["diffuse"] = cmds.getAttr( str(mat)+".diffuse")[0]
return res
###################Meshs and Objects#####################################################################################
def Sphere(self,name,res=16.,radius=1.0,pos=None,color=None,
mat=None,parent=None,type="nurb"):
# iMe[atn],node=cmds.sphere(name=name+"Atom_"+atn,r=rad)
name = self.checkName(name)
t=res/100.
if type == "nurb" :
transform_node,shape = cmds.sphere(name=name,r=radius,sections=int(res),
spans=int(res)) #NurbSphere
elif type == "poly":
transform_node,shape = cmds.polySphere( n=name, r=radius,sx=int(res), sy=int(res))
#shape is name+"Shape"
if pos is not None :
cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),
transform_node,absolute=True )
if mat is not None :
mat = self.getMaterial(mat)
if mat is not None :
self.assignMaterial(transform_node,mat)
else :
if color is not None :
mat = self.addMaterial("mat"+name,color)
else :
mat = self.addMaterial("mat"+name,[1.,1.,0.])
# mat = self.getMaterial(name)
self.assignMaterial(transform_node,mat)
if parent is not None :
self.reParent(transform_node,parent)
return transform_node,shape
def updateSphereMesh(self,mesh,verts=None,faces=None,basemesh=None,
scale=None,typ=True,**kw):
#scale or directly the radius..Try the radius
#scale is actualy the radius
# name = self.getObject(mesh)
#would it be faster with openMaya
mesh = self.checkName(mesh)
if typ:
cmds.sphere(mesh,e=1,r=scale)
else :
cmds.polySphere(mesh,e=1,r=scale)
def updateSphereObj(self,obj,coords=None):
if obj is None or coords is None: return
obj = self.getObject(obj)
#would it be faster we transform action
self.setTranslation(obj,coords)
# cmds.move(float(coords[0]),float(coords[1]),float(coords[2]), obj, absolute=True )
# def updateSphereObjs(self,g,coords=None):
# if not hasattr(g,'obj') : return
# if coords == None :
# newcoords=g.getVertices()
# else :
# newcoords=coords
# #print "upadteObjSpheres"
# #again map function ?
# for i,nameo in enumerate(g.obj):
# c=newcoords[i]
# o=getObject(nameo)
# cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
def instancesCylinder(self,name,points,faces,radii,
mesh,colors,scene,parent=None):
cyls=[]
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
cyl = self.oneCylinder(name+str(i),points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
instance=mesh,material=mat,parent = parent)
cyls.append(cyl)
return cyls
def updateInstancesCylinder(self,name,cyls,points,faces,radii,
mesh,colors,scene,parent=None,delete = True):
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
col=None
if i < len(colors):
col = colors[i]
if i < len(cyls):
self.updateOneCylinder(cyls[i],points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
material=mat,color=col)
self.toggleDisplay(cyls[i],True)
else :
cyl = self.oneCylinder(name+str(i),points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
instance=mesh,material=mat,parent = parent)
cyls.append(cyl)
if len(faces) < len(cyls) :
#delete the other ones ?
for i in range(len(faces),len(cyls)):
if delete :
obj = cyls.pop(i)
self.deleteObject(obj)
else :
self.toggleDisplay(cyls[i],False)
return cyls
def instancesSphere(self,name,centers,radii,meshsphere,colors,scene,parent=None):
name = self.checkName(name)
sphs=[]
mat = None
if len(colors) == 1:
print (colors)
mat = self.retrieveColorMat(colors[0])
if mat == None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(centers)):
sphs.append(cmds.instance(meshsphere,name=name+str(i)))
#local transformation ?
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),name+str(i))
cmds.scale(float(radii[i]),float(radii[i]),float(radii[i]), name+str(i),absolute=True )
if mat == None : mat = self.addMaterial("matsp"+str(i),colors[i])
self.assignMaterial(name+str(i),mat)#mat[bl.retrieveColorName(sphColors[i])]
self.addObjectToScene(scene,sphs[i],parent=parent)
return sphs
def updateInstancesSphere(self,name,sphs,centers,radii,meshsphere,
colors,scene,parent=None,delete=True):
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(centers)):
if len(radii) == 1 :
rad = radii[0]
elif i >= len(radii) :
rad = radii[0]
else :
rad = radii[i]
if i < len(sphs):
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),sphs[i])#name+str(i))
cmds.scale(float(rad),float(rad),float(rad), sphs[i],absolute=True )
# sphs[i].SetAbsPos(self.FromVec(centers[i]))
# sphs[i][905]=c4d.Vector(float(rad),float(rad),float(rad))
if mat == None :
if colors is not None and i < len(colors) and colors[i] is not None :
mat = self.addMaterial("matsp"+str(i),colors[i])
if colors is not None and i < len(colors) and colors[i] is not None :
self.colorMaterial(mat,colors[i])
self.toggleDisplay(sphs[i],True)
else :
sphs.append(cmds.instance(meshsphere,name=name+str(i)))
#local transformation ?
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),name+str(i))
cmds.scale(float(rad),float(rad),float(rad), name+str(i),absolute=True )
if mat == None : mat = self.addMaterial("matsp"+str(i),colors[i])
self.assignMaterial(name+str(i),mat)#mat[bl.retrieveColorName(sphColors[i])]
self.addObjectToScene(scene,sphs[i],parent=parent)
if mat == None :
if colors is not None and i < len(colors) and colors[i] is not None :
mat = self.addMaterial("matsp"+str(i),colors[i])
self.addObjectToScene(scene,sphs[i],parent=parent)
if len(centers) < len(sphs) :
#delete the other ones ?
for i in range(len(centers),len(sphs)):
if delete :
obj = sphs.pop(i)
print "delete",obj
self.deleteObject(obj)
else :
self.toggleDisplay(sphs[i],False)
return sphs
def constraintLookAt(self,object):
"""
Cosntraint an hostobject to llok at the camera
@type object: Hostobject
@param object: object to constraint
"""
self.getObject(object)
cmds.orientConstraint( 'persp', object )
def updateText(self,text,string="",parent=None,size=None,pos=None,font=None):
text = self.checkName(text)
if string : cmds.textCurves(text, e=1, t=string )
# if size is not None : text[c4d.PRIM_TEXT_HEIGHT]= size
# if pos is not None : self.setTranslation(text,pos)
# if parent is not None : self.reParent(text,parent)
def extrudeText(self,text,**kw):
tr,parent = self.getTransformNode(text)
nChild = parent.childCount()
print nChild
#dag = om.MFnDagNode(node)
dnode = om.MFnDependencyNode(parent.transform())
child_path = om.MDagPath()
cmd ="constructionHistory=True,normalsOutwards=True,range=False,polygon=1,\
tolerance=0.01,numberOfSides=4 ,js=True,width=0 ,depth=0 ,extrudeDepth=0.5,\
capSides=4 ,bevelInside=0 ,outerStyle=0 ,innerStyle=0 ,\
polyOutMethod=0,polyOutCount=200,polyOutExtrusionType=2 ,\
polyOutExtrusionSamples=3,polyOutCurveType=2 ,\
polyOutCurveSamples=3,polyOutUseChordHeightRatio=0)"
for i in range(nChild):
#get all curve
node_child = parent.child(i)
child_tr,child_path = self.getTransformNode(node_child)
dnode = om.MFnDependencyNode(node_child)
nChildChild = child_path.childCount()
for j in range(nChildChild):
cmdchilds="cmds.bevelPlus("
node_child_child = child_path.child(j)
dnode = om.MFnDependencyNode(node_child_child)
cmdchilds+='"'+dnode.name()+'",'
cmdchilds+="n='bevel_"+dnode.name()+str(j)+"',"+cmd
cmdbis = 'cmds.bevel("'+dnode.name()+'",n="bevel_'+dnode.name()+str(j)+'", ed=0.5)'
eval(cmdbis)
cmds.bevel(e=1,w=0,d=0)
def Text(self,name="",string="",parent=None,size=5.,pos=None,font='Courier',
lookAt=False,**kw):
return_extruder = False
name = self.checkName(name)
if "extrude" in kw :
extruder = None
if type(kw["extrude"]) is bool and kw["extrude"]:
pass
text = cmds.textCurves( n= name, f=font, t=string )
## Result: [u'testShape', u'makeTextCurves2'] #
if pos is not None :
#should add -14
pos[0] = pos[0]-14.0#not center
self.setTranslation(name+'Shape',pos)
# if parent is not None:
self.addObjectToScene(self.getCurrentScene(),name+'Shape',parent=parent)
if lookAt:
self.constraintLookAt(name)
self.scaleObj(text[0],[size,size,size])
if "extrude" in kw :
extruder = None
#create an extruder
if type(kw["extrude"]) is bool and kw["extrude"]:
self.extrudeText(text)
# extruder = cmds.bevelPlus( text[1], ed=0.5)
# extruder = cmds.bevel( text, ed=0.5,w=0.0,d=0.0)
#reparent the extruder ?
# self.reParent(extruder,parent)
#po=1, cap=4,
# extruded=cmds.extrude( extrude_obj,self.checkName(name)+"_spline",
# et = 2, ucp = 1,n=name, fpt=1,upn=1)
return_extruder = True
else :
self.extrudeText(text)
# extruder = cmds.bevel( text, ed=0.5,w=0.0,d=0.0)
self.reParent(extruder,parent)
# if extruder is not None :
# pass
self.addObjectToScene(self.getCurrentScene(),name+'Shape',parent=parent)
if return_extruder :
return text,None
return text
def getBoxSize(self,name):
#kPolyCube
# cmds.select(name)
# print(name)
sx = cmds.polyCube(name, q=True,w=True)
sy = cmds.polyCube(name, q=True,h=True)
sz = cmds.polyCube(name, q=True,d=True)
return [sx,sy,sz]
def box(self,name,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,visible=1,
mat=None,**kw):
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
res = 15.
name = self.checkName(name)
box,shape = cmds.polyCube(name=name,w=float(size[0]),h=float(size[1]),
d=float(size[2]), sx=res, sy=res, sz=res )
mat = self.addMaterial("mat"+name,[1.,1.,0.])
self.assignMaterial(box,mat)
cmds.move(float(center[0]),float(center[1]),float(center[2]),box)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.addObjectToScene(self.getCurrentScene(),box,parent=parent)
return box,shape
def updateBox(self,box,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,
visible=1, mat = None):
box=self.getObject(box)
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
cmds.move(float(center[0]),float(center[1]),float(center[2]),box)
cmds.polyCube(box,e=1,w=float(size[0]),h=float(size[1]),
d=float(size[2]))
def Cone(self,name,radius=1.0,length=1.,res=16,pos = None,parent=None):
name = self.checkName(name)
diameter = 2*radius
cone,mesh=cmds.cone(name=name,axis=[0.0,1.0,0.0],hr=length,
r=radius,s=res,nsp=res)
if pos != None : cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),cone)
if parent is not None:
self.reParent(cone,parent)
# self.addObjectToScene(self.getCurrentScene(),instance)
return str(cone),mesh
def Cylinder(self,name,radius=1.,length=1.,res=16,pos = None,parent=None,**kw):
#import numpy
name = self.checkName(name)
diameter = 2*radius
axis = [0.0,0.0,1.0]
if "axis" in kw : #orientation
dic = {"+X":[1.,0.,0.],"-X":[-1.,0.,0.],"+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
"+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
if type(kw["axis"]) is str :
axis = dic[kw["axis"]]
else :
axis = kw["axis"]
cyl,mesh=cmds.polyCylinder(name=name,axis=axis,
r=radius, sx=res, sy=res, sz=5, h=length)
if pos != None : cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),cyl)
if parent is not None:
self.reParent(cyl,parent)
# self.addObjectToScene(self.getCurrentScene(),instance)
return str(cyl),mesh#,mesh
def oneCylinder(self,name,head,tail,radius=None,instance=None,material=None,
parent = None,color=None):
name = self.checkName(name)
laenge,wsz,wz,coord=self.getTubeProperties(head,tail)
# print "oneCylinder instance",instance
if instance == None :
obj = self.Cylinder(name)
else :
obj = self.newMInstance(name,instance,parent=parent)
# obj = name
# self.translateObj(name,coord)
# self.setTranslation(name,coord)
# #obj.setLocation(float(coord[0]),float(coord[1]),float(coord[2]))
# cmds.setAttr(name+'.ry',float(degrees(wz)))
# cmds.setAttr(name+'.rz',float(degrees(wsz)))
# cmds.scale( 1, 1, laenge, name,absolute=True )
if radius is None :
radius= 1.0
self.setTransformation(obj,trans=coord,scale=[radius, radius, laenge],
rot=[0.,wz,wsz])
if material is not None :
self.assignMaterial(obj,material)
elif color is not None :
mats = self.getMaterialObject(obj)
if not mats :
mat = self.addMaterial("mat_"+name,color)
self.assignMaterial(obj,mat)
else :
self.colorMaterial(mats[0],color)
return obj
def updateOneCylinder(self,name,head,tail,radius=None,material=None,color=None):
name = self.checkName(name)
laenge,wsz,wz,coord=self.getTubeProperties(head,tail)
obj = self.getObject(name)
if radius is None :
radius= 1.0
self.setTransformation(obj,trans=coord,scale=[radius, radius, laenge],
rot=[0.,wz,wsz])
if material is not None :
self.assignMaterial(obj,material)
elif color is not None :
mats = self.getMaterialObject(obj)
if not mats :
mat = self.addMaterial("mat_"+name,color)
self.assignMaterial(obj,mat)
else :
self.colorMaterial(mats[0],color)
return obj
def updateTubeObj(self,o,coord1,coord2):
laenge,wsz,wz,pos=self.getTubeProperties(coord1,coord2)
self.setTransformation(o,trans=pos,scale=[1., 1., laenge],
rot=[0.,wz,wsz])
# cmds.scale( 1., 1., laenge, o,absolute=True )
# self.setTranslation(o,pos)
## cmds.move(float(pos[0]),float(pos[1]),float(pos[2]), o, absolute=True )
# cmds.setAttr(o+'.ry',float(degrees(wz)))
# cmds.setAttr(o+'.rz',float(degrees(wsz)))
def updateTubeMeshold(self,atm1,atm2,bicyl=False,cradius=1.0,quality=0):
self.updateTubeObj(atm1,atm2,bicyl=bicyl,cradius=cradius)
def updateTubeMesh(self,mesh,basemesh=None,cradius=1.0,quality=0):
# print mesh
# print cradius, mesh
mesh = self.getObject(str(mesh))
# print mesh
maya.cmds.polyCylinder(mesh,e=True,r=cradius)
# def updateTubeObjs(self,g):
# if not hasattr(g,'obj') : return
# newpoints=g.getVertices()
# newfaces=g.getFaces()
# #print "upadteObjTubes"
# for i,o in enumerate(g.obj):
# laenge,wsz,wz,pos=self.getTubeProperties(points[f[0]],points[f[1]])
# cmds.scale( 1, 1, laenge, o,absolute=True )
# cmds.move(float(pos[0]),float(pos[1]),float(pos[2]), o, absolute=True )
# cmds.setAttr(o+'.ry',float(degrees(wz)))
# cmds.setAttr(o+'.rz',float(degrees(wsz)))
def plane(self,name,center=[0.,0.,0.],size=[1.,1.],cornerPoints=None,visible=1,**kw):
#polyPlane([axis=[linear, linear, linear]], [
# constructionHistory=boolean], [createUVs=int], [height=linear],
# [name=string], [object=boolean], [subdivisionsX=int],
# [subdivisionsY=int], [texture=int], [width=linear])
plane,shape = cmds.polyPlane(name=name,w=float(size[0]),h=float(size[1]),
ax=[0.,0.,1.])
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
cmds.move(float(center[0]),float(center[1]),float(center[2]),plane)
if "subdivision" in kw :
cmds.polyPlane(plane,e=1,
sx=kw["subdivision"][0],sy=kw["subdivision"][1])
if "axis" in kw : #orientation
dic = { "+X":[1.,0.,0.],"-X":[-1.,0.,0.],
"+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
"+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
idic = { 0:[1.,0.,0.],1:[-1.,0.,0.],
2:[0.,1.,0.],3:[0.,-1.,0.],
4:[0.,0.,1.],5:[0.,0.,-1.]}
if type(kw["axis"]) is str :
axis = dic[kw["axis"]]
else : #int
axis = idic[kw["axis"]]
cmds.polyPlane(plane,e=1,ax=axis)
# if "material" in kw :
# texture = plane.MakeTag(c4d.Ttexture)
# if type(kw["material"]) is c4d.BaseMaterial :
# texture[1010] = kw["material"]
# else :
# texture[1010] = self.addMaterial("plane",[1.,1.,0.])
parent = None
if "parent" in kw :
parent = kw["parent"]
self.addObjectToScene(self.getCurrentScene(),plane,parent=parent)
return plane,shape
def PointCloudObject(self,name,**kw):
#print "cloud", len(coords)
name = self.checkName(name)
coords=kw['vertices']
# nface = 0
# if kw.has_key("faces"):
# nface = len(kw['faces'])
# obj = self.createsNmesh(name+'ds',coords,None,[])
# return obj[0]
partShape,part = self.particule(name+"ds", coords)
return part,partShape
def getJointPosition(self,jointname):
return self.getTranslation(jointname)
#return self.getTranslationOM(jointname)
# fnJt=oma.MFnIkJoint()
# mobj = self.getNode(jointname)
# if not fnJt.hasObj(mobj ) :
# print "no joint provided!"
# return None
# fnJt.setObject(mobj)
# cvs = om.MPointArray()
# ncurve.getCVs(cvs,om.MSpace.kPostTransform)
# return cvs
def updateArmature(self,basename,coords,listeName=None,scn=None,root=None,**kw):
for j in range(len(coords)):
atC=coords[j]
name = basename+'bone'+str(j)
if listeName is not None:
name = listeName[j]
relativePos=[atC[0],atC[1],atC[2]]
cmds.joint(self.checkName(name),e=1, p=relativePos)
def armature(self,basename,coords,listeName=None,scn=None,root=None,**kw):
#bones are called joint in maya
#they can be position relatively or globally
basename = self.checkName(basename)
bones=[]
# center = self.getCenter(coords)
parent = self.newEmpty(basename)
self.addObjectToScene(scn,parent,parent=root)
for j in range(len(coords)):
atC=coords[j]
#bones.append(c4d.BaseObject(BONE))
relativePos=[atC[0],atC[1],atC[2]]
name = basename+'bone'+str(j)
if listeName is not None:
name = listeName[j]
joint=cmds.joint(n=self.checkName(name), p=relativePos) #named "joint1"
bones.append(joint)
if scn != None :
if j==0 : self.addObjectToScene(scn,bones[j],parent=parent)
else : self.addObjectToScene(scn,bones[j],parent=bones[j-1])
return parent,bones
def bindGeom2Bones(self,listeObject,bones):
"""
Make a skinning. Namely bind the given bones to the given list of geometry.
This function will joins the list of geomtry in one geometry
@type listeObjects: list
@param listeObjects: list of object to joins
@type bones: list
@param bones: list of joins
"""
if len(listeObject) >1:
self.JoinsObjects(listeObject)
else :
self.ObjectsSelection(listeObject,"new")
#2- add the joins to the selection
self.ObjectsSelection(bones,"add")
#3- bind the bones / geoms
cmds.bindSkin()
#IK:cmds.ikHandle( sj='joint1', ee='joint5', p=2, w=.5 )
def getParticulesPosition(self,name):
name = self.checkName(name)
partO=self.getMShape(name) #shape..
fnP = omfx.MFnParticleSystem(partO)
pos=om.MVectorArray(fnP.count())
oriPsType = fnP.renderType()
if(oriPsType == omfx.MFnParticleSystem.kTube):
fnP.position0(pos);
else:
fnP.position(pos);
return pos
def setParticulesPosition(self,newPos,PS=None):
if PS == None :
return
obj = self.checkName(PS)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in newPos:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
# pts.append(p)
#fnP.emit(pts)
fnP.setPerParticleAttribute("position",pos)
def getParticles(self,name,**kw):
PS = self.getObject(name)
return PS
def updateParticles(self,newPos,PS=None,**kw):
if PS == None :
return
obj = self.checkName(PS)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
currentN = fnP.count()
N = len(newPos)
fnP.setCount(N)
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in newPos:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
fnP.setPerParticleAttribute("position",pos)
#this update the particle position not the particle number
def updateParticleRotation(self,obj,rotation):
obj = self.checkName(obj)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
rot=om.MVectorArray(fnP.count())
#euler angle?
for v in rotation:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
fnP.setPerParticleAttribute("rotationPP",rot)
#this update the particle position not the particle number
def updateParticle(self,obj,vertices,faces):
obj = self.checkName(obj)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
if(oriPsType == omfx.MFnParticleSystem.kTube):
if faces is None :
return
position0 = om.MVectorArray()
position1 = om.MVectorArray()
for i,f in enumerate(face):
coord1 = c = vertices[f[0]]
coord2 = vertices[f[1]]
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position0.append(p)
c= coord2
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position1.append(p)
fnP.setPerParticleAttribute("position0",position0)
fnP.setPerParticleAttribute("position1",position1)
else :
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in vertices:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
# pts.append(p)
#fnP.emit(pts)
fnP.setPerParticleAttribute("position",pos)
#fnP.setPerParticleAttribute? position
#stat = resultPs.emit(finalPos);
def particule(self,name, coord,**kw):
name = self.checkName(name)
if coord is not None :
try :
coord = numpy.array(coord).tolist()
except :
pass
part,partShape=cmds.particle(n=name,p=list(coord))
else :
part,partShape=cmds.particle(n=name)
# instant = cmds.particleInstancer(part, a = 1, object = cyl[0],
# position = 'bondPos', aimDirection = 'velocity',
# scale = 'bondScaler',
# name = (chainName+ '_geoBondsInstances'))
return partShape,part
def updateMetaball(self,name,vertices=None):
if vertices is None :
return
self.updateParticle(name,vertices=vertices,faces=None)
def metaballs(self,name,coords,radius,scn=None,root=None,**kw):
# atoms=selection.findType(Atom)
#no metaball native in mauya, need to use particle set to blobby surface
#use of the point cloud polygon object as the emmiter
# name is on the form 'metaballs'+mol.name
# if scn == None:
# scn = self.getCurrentScene()
#molname = name.split("balls")[1]
#emiter = molname+"_cloud"
name = self.checkName(name)
partShape,part = self.particule(name, coords)
#need to change the rep
node = self.getNode(partShape)
plug = self.getNodePlug("particleRenderType",node)
plug.setInt(7); #Bloby surface s/w
return part,partShape
def splinecmds(self,name,coords,type="",extrude_obj=None,scene=None,parent=None):
#Type : "sBezier", "tBezier" or ""
name = self.checkName(name)
if scene is None :
scene = self.getCurrentScene()
#parent=newEmpty(name)
curve = cmds.curve(n=name,p=coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
objName=cmds.ls("curveShape1")
cmds.rename(objName,name+"Shape")
cmds.setAttr(name+"Shape"+".dispEP",1)
if parent is not None :
cmds.parent( name, parent)
return name,None
def extrudeSpline(self,spline,**kw):
extruder = None
shape = None
spline_clone = None
if "shape" in kw:
if type(kw["shape"]) == str :
shape = self.build_2dshape("sh_"+kw["shape"]+"_"+str(spline),
kw["shape"])[0]
else :
shape = kw["shape"]
if shape is None :
shapes = self.build_2dshape("sh_circle"+str(spline))[0]
if "extruder" in kw:
extruder = kw["extruder"]
# if extruder is None :
# extruder=self.sweepnurbs("ex_"+spline.GetName())
if "clone" in kw and kw["clone"] :
spline_clone = cmds.duplicate(spline,n="exd"+str(spline))
self.resetTransformation(spline_clone)
extruder=cmds.extrude( shape[0],spline_clone,
et = 2, ucp = 1,n="ex_"+str(spline), fpt=1,upn=1)
self.toggleDisplay(spline_clone,False)
return extruder,shape,spline_clone
else :
extruder=cmds.extrude( shape[0],spline,
et = 2, ucp = 1,n="ex_"+str(spline), fpt=1,upn=1)
return extruder,shape
#setAttr "extrudedSurfaceShape1.simplifyMode" 1;
def build_2dshape(self,name,type="circle",**kw):
shapedic = {"circle":{"obj":cmds.circle,"size":["r",]},
# "rectangle":{"obj":None,"size":[0,0]}
}
shape = shapedic[type]["obj"](n=name, nr=(1, 0, 0), c=(0, 0, 0),r=0.3)
dopts = [1.,1.]
if "opts" in kw :
dopts = kw["opts"]
if len(shapedic[type]["size"]) == 1 :
pass
# shape[shapedic[type]["size"][0]] = dopts[0]
else :
for i in range(len(shapedic[type]["size"])) :
pass
# shape[shapedic[type]["size"][i]] = dopts[i]
self.addObjectToScene(None,shape)
return shape,name+"Shape"
def spline(self,name,coords,type="",extrude_obj=None,scene=None,
parent=None,**kw):
#Type :
name = self.checkName(name)
if scene is None :
scene = self.getCurrentScene()
#parent=newEmpty(name)
if extrude_obj is not None:
shape,curve = self.omCurve(name+"_spline",coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
if parent is not None :
cmds.parent( self.checkName(name)+"_spline", parent)
# extrude profile curve along path curve using "flat" method
# The extrude type can be distance-0, flat-1, or tube-2
extruded=cmds.extrude( extrude_obj,self.checkName(name)+"_spline",
et = 2, ucp = 1,n=name, fpt=1,upn=1)
#setAttr "extrudedSurfaceShape1.simplifyMode" 1;
return name,shape,extruded
shape,curve = self.omCurve(name,coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
if parent is not None :
cmds.parent( self.checkName(name), parent)
return name,shape
def getSplinePoints(self,name,convert=False):
name = self.checkName(name)
ncurve = om.MFnNurbsCurve()
mobj = self.getNode(self.checkName(name))
if not ncurve.hasObj(mobj ) :
mobj = self.getNode(self.checkName(name)+"Shape")
if not ncurve.hasObj(mobj) :
print "no curve shape provided!"
return None
ncurve.setObject(mobj)
cvs = om.MPointArray()
ncurve.getCVs(cvs,om.MSpace.kPostTransform)
return cvs
def update_spline(self,name,coords):
#need to provide the object shape name
name = self.checkName(name)
ncurve = om.MFnNurbsCurve()
mobj = self.getNode(self.checkName(name))
if not ncurve.hasObj(mobj ) :
mobj = self.getNode(self.checkName(name)+"Shape")
if not ncurve.hasObj(mobj) :
print "no curve shape provided!"
return None
ncurve.setObject(mobj)
deg = 3; #Curve Degree
ncvs = len(coords); #Number of CVs
spans = ncvs - deg # Number of spans
nknots = spans+2*deg-1 # Number of knots
controlVertices = om.MPointArray()
knotSequences = om.MDoubleArray()
# point array of plane vertex local positions
for c in coords:
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
controlVertices.append(p)
# for i in range(nknots):
# knotSequences.append(i)
# create(controlVertices,knotSequences, deg,
# om.MFnNurbsCurve.kOpen, False, False
ncurve.setCVs(controlVertices,om.MSpace.kPostTransform)
# ncurve.setKnots(knotSequences)
ncurve.updateCurve()
def omCurve(self,name,coords,**kw):
#default value
name = self.checkName(name)
deg = 3; #Curve Degree
ncvs = len(coords); #Number of CVs
if kw.has_key("deg"):
deg = kw['deg']
spans = ncvs - deg # Number of spans
nknots = spans+2*deg-1 # Number of knots
controlVertices = om.MPointArray()
knotSequences = om.MDoubleArray()
# point array of plane vertex local positions
for c in coords:
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
controlVertices.append(p)
for i in range(nknots):
knotSequences.append(i)
curveFn=om.MFnNurbsCurve()
curve = curveFn.create(controlVertices,knotSequences, deg,
om.MFnNurbsCurve.kOpen, False, False)
# curveFn.setName(name)
print (curveFn.partialPathName())
print (curveFn.name())
shapename = curveFn.name()
objName = shapename.split("Shape")[0]
n = shapename.split("Shape")[1]
# objName=cmds.ls("curve1")[0]
cmds.rename(objName+n,name)
nodeName = curveFn.name() #curveShape
cmds.rename(nodeName, name+"Shape")
return curveFn, curve
def createLines(self,name,coords,normal,faces):
partShape,part = self.linesAsParticles(name,coords,faces)
return part
def linesAsParticles(self,name,coords,face):
#what about omfx to create the system...
name = self.checkName(name)
partShape,part = self.particule(name, None)
path = self.getMShape(part)
node = path.node()
depNodeFn = om.MFnDependencyNode( node )
plug = self.getNodePlug("particleRenderType", node )
plug.setInt(9); #Tube s/w
fnP = omfx.MFnParticleSystem(path)
pts = om.MPointArray()
position0 = om.MVectorArray()
position1 = om.MVectorArray()
for i,f in enumerate(face):
coord1 = c = coords[f[0]]
coord2 = coords[f[1]]
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position0.append(p)
c= coord2
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position1.append(p)
laenge,wsz,wz,c=self.getTubeProperties(coord1,coord2)
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
pts.append(p)
# fnP.emit(pts)
fnP.setPerParticleAttribute("position0",position0)
fnP.setPerParticleAttribute("position1",position1)
fnP.emit(pts)
return partShape,part
def mayaVec(self,v):
return om.MFloatPoint( float(v[0]),float(v[1]),float(v[2]) )
def getFaces(self,obj,**kw):
# import numpy
node = self.getNode('mesh_'+obj)
meshnode = om.MFnMesh(node)
triangleCounts =om.MIntArray()
triangleVertices= om.MIntArray()
meshnode.getTriangles(triangleCounts,triangleVertices)
if self._usenumpy :
return numpy.array(triangleVertices).reshape((len(triangleVertices)/3,3))
else :
return triangleVertices
def polygons(self,name,proxyCol=False,smooth=False,color=[[1,0,0],], material=None, **kw):
normals = kw["normals"]
name,meshFS = self.createsNmesh(name,kw['vertices'],normals,kw['faces'],color=color,
smooth=smooth,material=material)
return name
def createsNmesh(self,name,vertices,normal,faces,color=[[1,0,0],],smooth=False,
material=None,proxyCol=False,**kw):
"""
This is the main function that create a polygonal mesh.
@type name: string
@param name: name of the pointCloud
@type vertices: array
@param vertices: list of x,y,z vertices points
@type vnormals: array
@param vnormals: list of x,y,z vertex normals vector
@type faces: array
@param faces: list of i,j,k indice of vertex by face
@type smooth: boolean
@param smooth: smooth the mesh
@type material: hostApp obj
@param material: material to apply to the mesh
@type proxyCol: booelan
@param proxyCol: do we need a special object for color by vertex (ie C4D)
@type color: array
@param color: r,g,b value to color the mesh
@rtype: hostApp obj
@return: the polygon object
"""
if len(color) == 3 :
if type(color[0]) is not list :
color = [color,]
outputMesh = om.MObject()
#print outputMesh.name()
#cmds.rename(outputMesh.name(), name)
#test=cmds.createNode( 'transform', n='transform1' )
name=name.replace(":","_")
name=name.replace("-","_")
name=name.replace("'","")
name=name.replace('"',"")
name=self.checkName(name)
#print "NMesh ",name
numFaces = 0
if faces is not None :
numFaces = len(faces)
numVertices = len(vertices)
# point array of plane vertex local positions
points = om.MFloatPointArray()
for v in vertices:
points.append(self.mayaVec(v))
#mayaVertices=map(mayaVec,vertices)
#map(points.append,mayaVertices)
# vertex connections per poly face in one array of indexs into point array given above
faceConnects = om.MIntArray()
for f in faces:
for i in f :
faceConnects.append(int(i))
# an array to hold the total number of vertices that each face has
faceCounts = om.MIntArray()
for c in range(0,numFaces,1):
faceCounts.append(int(len(f)))
#create mesh object using arrays above and get name of new mesh
meshFS = om.MFnMesh()
newMesh = meshFS.create(numVertices, numFaces, points, faceCounts,
faceConnects, outputMesh)
# meshFS.updateSurface()
nodeName = meshFS.name()
cmds.rename(nodeName, "mesh_"+name)
#print 'Mesh node name is: %s' % nodeName
objName=cmds.ls("polySurface1")[0]
cmds.rename(objName,name)
#newName should bydefault polySurface something
# assign new mesh to default shading group
if color is not None and len(color) > 1:
self.color_mesh_perVertex(meshFS,color)
doMaterial = True
if type(material) is bool :
doMaterial = material
if doMaterial:
if material == None :
if len(name.split("_")) == 1 : splitname = name
else :
splitname = name.split("_")[1]
#print name,name[:4],splitname,splitname[:4]
self.assignNewMaterial( "mat_"+name, color[0],'lambert' ,"mesh_"+name)
else :
self.assignMaterial("mesh_"+name,material)
if "parent" in kw :
parent = kw["parent"]
# print "reparent ", name,parent
self.reParent(name,parent)
return name,meshFS#,outputMesh
def updatePoly(self,obj,vertices=None,faces=None):
if type(obj) is str:
obj = self.getObject(obj)
if obj is None : return
node = self.getMShape(self.checkName(obj))
if node.hasFn(om.MFn.kMesh):
self.updateMesh(obj,vertices=vertices,faces=faces)
elif node.hasFn(om.MFn.kParticle):
self.updateParticle(obj,vertices=vertices,faces=faces)
def updateMesh(self,meshnode,vertices=None,faces=None, smooth=False,**kw):#chains.residues.atoms.coords,indices
# print meshnode,type(meshnode)
if type(meshnode) is str or type(meshnode) is unicode:
node = self.getMShape(self.checkName(meshnode))#self.getNode(self.checkName(meshnode))
meshnode = om.MFnMesh(node)
# meshnode = self.getObject(meshnode,doit=True)
if meshnode is None:
return
nv = meshnode.numVertices()
nf = meshnode.numPolygons()
if vertices is not None :
numVertices = len(vertices)
# point array of plane vertex local positions
points = om.MFloatPointArray()
for v in vertices:
points.append(self.mayaVec(v))
else :
return
#numVertices = nv
if faces is not None :
numFaces = len(faces)
else :
numFaces = nf
faces = []
faceConnects = om.MIntArray()
for f in faces:
for i in f :
faceConnects.append(int(i))
# an array to hold the total number of vertices that each face has
faceCounts = om.MIntArray()
for c in range(0,numFaces,1):
faceCounts.append(int(len(f)))
#newMesh = meshFS.create(numVertices, numFaces, points, faceCounts, faceConnects, outputMesh)
result = meshnode.createInPlace(numVertices, numFaces, points, faceCounts, faceConnects)
meshnode.updateSurface()
def ToVec(self,v,**kw):
if hasattr(v,"x") :
return [v.x,v.y,v.z]
else :
return v
def arr2marr(self,v):
#from http://www.rtrowbridge.com/blog/2009/02/maya-api-docs-demystified-for-python-users/
self.msutil.createFromList( v, len(v) )
doubleArrayPtr = self.msutil.asDoublePtr()
return doubleArrayPtr
# def vecp2m(self,v):
# #from http://www.rtrowbridge.com/blog/2009/02/maya-api-docs-demystified-for-python-users/
# doubleArrayPtr = self.arr2marr(v)
# vec = om.MVector( doubleArrayPtr )
# return vec
def FromVec(self,v,pos=True):
if isinstance(v,om.MVector):
return v
else :
return om.MVector(v[0], v[1], v[2])
def vec2m(self,v):
if isinstance(v,om.MVector):
return v
else :
return om.MVector(float(v[0]), float(v[1]), float(v[2]))
def ToMat(self,mat,**kw):
#maya - > python
return self.m2matrix(mat)
def FromMat(self,mat,**kw):
#pythn->maya
return self.matrixp2m(mat)
def matrixp2m(self,mat):
#from http://www.rtrowbridge.com/blog/2009/02/python-api-mtransformationmatrixgetrotation-bug/
if isinstance(mat,om.MTransformationMatrix) :
return mat
getMatrix = om.MMatrix()
matrixList = mat#mat.transpose().reshape(16,)
om.MScriptUtil().createMatrixFromList(matrixList, getMatrix)
mTM = om.MTransformationMatrix( getMatrix )
rotOrder = om.MTransformationMatrix().kXYZ
return mTM
def m2matrix(self,mMat):
#return mMat
#do we use numpy
if isinstance(mMat,om.MTransformationMatrix) :
matrix = mMat.asMatrix()
elif isinstance(mMat,om.MMatrix):
matrix = mMat
else :
return mMat
us=om.MScriptUtil()
out_mat = [0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0]
us.createFromList( out_mat, len(out_mat) )
ptr1 = us.asFloat4Ptr()
matrix.get(ptr1)
res_mat = [[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0]]
for i in range(4):
for j in range(4):
val = us.getFloat4ArrayItem(ptr1, i,j)
res_mat[i][j]=val
return res_mat
def alignNormal(self,poly):
pass
def triangulate(self,poly):
#select poly
doc = self.getCurrentScene()
mesh = self.getMShape(poly)
meshname= mesh.partialPathName()
#checkType
if self.getType(meshname) != self.MESH :
return
cmds.polyTriangulate(meshname)
def getMeshVertices(self,poly,transform=False,selected = False):
meshnode = self.checkIsMesh(poly)
if selected :
mverts_indice = []
verts =[]
v = om.MIntArray()
vertsComponent = om.MObject()
meshDagPath = om.MDagPath()
activeList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(activeList)
selIter = om.MItSelectionList(activeList,om.MFn.kMeshVertComponent)
while selIter.isDone():
selIter.getDagPath(meshDagPath, vertsComponent)
if not vertsComponent.isNull():
# ITERATE THROUGH EACH "FACE" IN THE CURRENT FACE COMPONENT:
vertIter = om.MItMeshVertex(meshDagPath,vertsComponent)
while vertIter.isDone():
mverts_indice.append(vertIter.index()) #indice of the faces
pts = faceIter.position(om.MSpace.kWorld)
verts.append(self.ToVec(pts))
faces.append(v[0],v[1],v[2])
vertIter.next()
selIter.next()
return verts,mverts_indice
else :
nv = meshnode.numVertices()
points = om.MFloatPointArray()
meshnode.getPoints(points)
vertices = [self.ToVec(points[i]) for i in range(nv)]
return vertices
def getMeshNormales(self,poly,selected = False):
meshnode = self.checkIsMesh(poly)
nv = meshnode.numNormals()
normals = om.MFloatVectorArray()
meshnode.getVertexNormals(False,normals)
vnormals = [self.ToVec(normals[i]) for i in range(nv)]
if selected :
v,indice = self.getMeshVertices(poly,selected = selected)
vn=[]
for i in indice:
vn.append(vnormals[i])
return vn,indice
return vnormals
def getMeshEdges(self,poly,selected = False):
#to be tested
meshnode = self.checkIsMesh(poly)
ne= meshnode.numEdges()
edges = []
edgeConnects = om.MIntArray()
for i in range(ne):
meshnode.getEdgeVertices(i,edgeConnects)
edges.append(edgeConnects)
return edges
def getMeshFaces(self,poly,selected = False):
meshnode = self.checkIsMesh(poly)
faceConnects = om.MIntArray()
faceCounts = om.MIntArray()
meshnode.getTriangles(faceCounts,faceConnects)
if selected :
mfaces_indice = []
faces =[]
v = om.MIntArray()
faceComponent = om.MObject()
meshDagPath = om.MDagPath()
activeList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(activeList)
selIter = om.MItSelectionList(activeList,om.MFn.kMeshPolygonComponent)
# print "itersel",selIter.isDone()
while 1:
selIter.getDagPath(meshDagPath, faceComponent);
# print "faces ?",faceComponent.isNull()
if not faceComponent.isNull():
# print ' ITERATE THROUGH EACH "FACE" IN THE CURRENT FACE COMPONENT:'
faceIter = om.MItMeshPolygon(meshDagPath,faceComponent)
while 1:
mfaces_indice.append(faceIter.index()) #indice of the faces
faceIter.getVertices(v)
faces.append([v[0],v[1],v[2]])
faceIter.next()
if faceIter.isDone() : break
selIter.next()
if selIter.isDone() : break
return faces,mfaces_indice
if self._usenumpy :
return numpy.array(faceConnects).reshape((len(faceConnects)/3,3))
else :
return faceConnects
def DecomposeMesh(self,poly,edit=True,copy=True,tri=True,transform=True,**kw):
# import numpy
if tri:
self.triangulate(poly)
if type(poly) is str or type(poly) is unicode or type(poly) is list:
mesh = self.getMShape(poly)#dagPath
else :
#have to a object shape node or dagpath
mesh = poly
print ("mesh ", mesh)
if self.getType(mesh.partialPathName()) != self.POLYGON :
if self.getType(mesh.partialPathName()) == self.PARTICULE:
v = self.getParticulesPosition(mesh.partialPathName())
return None,v,None
return None,None,None
#again problem with instance.....
meshnode = om.MFnMesh(mesh)
print ("meshnode",meshnode)
fnTrans = om.MFnTransform(self.getTransformNode(poly)[0])
print ("fnTrans",fnTrans)
# fnTrans = om.MFnTransform(mesh.transform())
#get infos
nv = meshnode.numVertices()
nf = meshnode.numPolygons()
# m = om.MFloatMatrix()
points = om.MFloatPointArray()
normals = om.MFloatVectorArray()
faceConnects = om.MIntArray()
faceCounts = om.MIntArray()
meshnode.getPoints(points)
#meshnode.getNormals(normals)
meshnode.getVertexNormals(False,normals)
meshnode.getTriangles(faceCounts,faceConnects)
fnormals=[]
if self._usenumpy :
faces = numpy.array(faceConnects).reshape((len(faceConnects)/3,3))
else :
faces = faceConnects
vertices = [self.ToVec(points[i]) for i in range(nv)]
vnormals = [self.ToVec(normals[i]) for i in range(nv)]
#remove the copy if its exist? or keep it ?
#need to apply the transformation
if transform :
#node = self.getNode(mesh)
#fnTrans = om.MFnTransform(mesh)
mmat = fnTrans.transformation()
if self._usenumpy :
mat = self.m2matrix(mmat)
vertices = self.ApplyMatrix(vertices,numpy.array(mat).transpose())
vnormals = self.ApplyMatrix(vnormals,numpy.array(mat).transpose())#??
else :
out_mat = [0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0]
self.msutil.createFromList( out_mat, len(out_mat) )
ptr1 = self.msutil.asFloat4Ptr()
mmat.asMatrix().get(ptr1)
m = om.MFloatMatrix(ptr1)
vertices = []
for i in range(nv) :
v = points[i]*m
vertices.append(self.ToVec(v))
# vertices = [self.ToVec(p*m) for p in points]
# if edit and copy :
# self.getCurrentScene().SetActiveObject(poly)
# c4d.CallCommand(100004787) #delete the obj
print ("ok",len(faces),len(vertices),len(vnormals))
if "fn" in kw and kw["fn"] :
fnormals = []
p = om.MVector( 0.,0.,0. )
for i in range(len(faces)) :
meshnode.getPolygonNormal(i,p,om.MSpace.kWorld)#kPostTransform
fnormals.append(self.ToVec(p))
return faces,vertices,vnormals,fnormals
else :
return faces,vertices,vnormals
def connectAttr(self,shape,i=0,mat=None):
if mat is not None :
#print shape
#print mat+"SG"
cmds.isConnected( shape+'.instObjGroups['+i+']', mat+'SG.dagSetMembers')
#need to get the shape : name+"Shape"
def rotation_matrix(self,angle, direction, point=None,trans=None):
"""
Return matrix to rotate about axis defined by point and direction.
"""
if self._usenumpy:
return Helper.rotation_matrix(angle, direction, point=point,trans=trans)
else :
direction = self.FromVec(direction)
direction.normalize()
out_mat = [1.0, 0.0, 0.0,0.0,
0.0, 1.0, 0.0,0.0,
0.0, 0.0, 1.0,0.0,
0.0, 0.0, 0.0,1.0]
m = self.matrixp2m(out_mat)
# m = om.MTransformationMatrix()
m.setToRotationAxis (direction,angle)
if point is not None:
point = self.FromVec(point)
m.setTranslation(point,om.MSpace.kPostTransform)# = point - (point * m)self.vec2m(trans),om.MSpace.kPostTransform
if trans is not None :
trans = self.FromVec(trans)
m.setTranslation(trans,om.MSpace.kPostTransform)
# M = m2matrix(m)
return m
#==============================================================================
# properties objec
#==============================================================================
def getPropertyObject(self, obj, key=["radius"]):
"""
Return the property "key" of the object obj
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@rtype : int, float, str, dict, list
@return : the property value
"""
res = []
if "pos" in key :
res.append(self.ToVec(self.getTranslation(obj)))
if "scale" in key :
res.append(self.ToVec(self.getScale(obj)))
if "rotation" in key :
mo = self.getTransformation(obj)
m = self.ToMat(mo)#.transpose()
mws = m.transpose()
rotMatj = mws[:]
rotMatj[3][:3]*=0.0
res.append(rotMatj)
if self.getType(obj) == self.SPHERE :
for k in key :
if k == "radius" :
try :
r=cmds.polySphere(obj,q=1,r=1)
except :
r=cmds.sphere(obj,q=1,r=1)
res.append(r)
if self.getType(obj) == self.CYLINDER :
for k in key :
if k == "radius" :
r=cmds.polyCylinder(obj,q=1,r=1)
res.append(r)
elif k == "length" :
h=cmds.polyCylinder(obj,q=1,h=1)
res.append(h)
elif k == "axis" :
ax = cmds.polyCylinder(obj,q=1,axis=1)
res.append(ax)
if self.getType(obj) == self.CUBE :
for k in key :
if k == "length" :
l = self.getBoxSize(obj)#cmds.polyCube(obj, q=True,h=True)
res.append(l)
return res
#===============================================================================
# Texture Mapping / UV
#===============================================================================
def getUV(self,object,faceIndex,vertexIndex,perVertice=True):
mesh = self.getMShape(object)
meshnode = om.MFnMesh(mesh)
#uv=[]
u_util = maya.OpenMaya.MScriptUtil()
u_util.createFromDouble(0.0)
u_ptr = u_util.asFloatPtr()
v_util = maya.OpenMaya.MScriptUtil()
v_util.createFromDouble(0.0)
v_ptr = v_util.asFloatPtr()
if perVertice :
meshnode.getUV(vertexIndex, u_ptr, v_ptr)
u = u_util.getFloat(u_ptr)
v = v_util.getFloat(v_ptr)
return [u,v]
else :
def getuv(faceIndex,iv,u_ptr,v_ptr):
meshnode.getPolygonUV(faceIndex,iv,u_ptr,v_ptr)
u = u_util.getFloat(u_ptr)
v = v_util.getFloat(v_ptr)
return [u,v]
#uv of the face
return [getuv(faceIndex,iv,u_ptr,v_ptr) for iv in range(3)]
#
#
##meshFn = maya.OpenMaya.MFnMesh(node)
##
#u_util = maya.OpenMaya.MScriptUtil()
#u_util.createFromDouble(0.0)
#u_ptr = u_util.asFloatPtr()
#v_util = maya.OpenMaya.MScriptUtil()
#v_util.createFromDouble(0.0)
#v_ptr = v_util.asFloatPtr()
#
#meshFn.getUV(0, u_ptr, v_ptr)
#
#u = u_util.getFloat(u_ptr)
#v = v_util.getFloat(v_ptr))
##getPolygonUVid
##getPolygonUV
#
#should be faster ?
def setUVs(self,object,uvs):
#uvs is a dictionary key are faceindex, values it the actual uv for the 3-4 vertex
ob = self.getObject(object)
node = self.getNode('mesh_'+ob)
meshnode = om.MFnMesh(node)
meshnode.clearUVs()
u = om.MFloatArray()
v = om.MFloatArray()
uvCounts = om.MIntArray()
uvIds = om.MIntArray()
i = 0
for f in uvs:
for k,uv in enumerate(uvs[f]):
uvIds.append(i)
uvCounts.append(len(uvs[f]))
u.append(uv[0])
v.append(uv[1])
#meshnode.setUV(i,uv[0],uv[1])
#meshnode.assignUV(f,k,i)
i = i +1
meshnode.setUVs(u,v)
meshnode.assignUVs(uvCounts,uvIds)
def setUV(self,object,faceIndex,vertexIndex,uv,perVertice=True,uvid=0):
ob = self.getObject(object)
node = self.getNode('mesh_'+ob)
meshnode = om.MFnMesh(node)
for k in range(3):
luv = uv[k]
meshnode.setUV(uvid,luv[0],luv[1])
meshnode.assignUV(faceIndex,k,uvid)
uvid = uvid +1
return uvid
def hyperShade_meVertCol(self):
#mel command : nodeReleaseCallback graph1HyperShadeEd mentalrayVertexColors1 none;
# nodeOutlinerInputsCmd connectWindow|tl|cwForm|connectWindowPane|leftSideCW connectWindow|tl|cwForm|connectWindowPane|rightSideCW; nodeOutliner -e -r connectWindow|tl|cwForm|connectWindowPane|rightSideCW;
# connectAttr -f mesh_MSMS_MOL1crn.colorSet[0].colorName mentalrayVertexColors1.cpvSets[0];
# // Result: Connected mesh_MSMS_MOL1crn.colorSet.colorName to mentalrayVertexColors1.cpvSets. //
# // Result: connectWindow|tl|cwForm|connectWindowPane|rightSideCW //
pass
#==============================================================================
# import / expor / read load / save
#==============================================================================
def readFile(self,filename,**kw):
fileName, fileExtension = os.path.splitext(filename)
fileExtension=fileExtension.replace(".","")
fileExtension=fileExtension.upper()
if fileExtension == "MA":
fileExtension = "mayaAscii"
elif fileExtension == "DAE":
fileExtension = "DAE_FBX"
elif fileExtension == "FBX":
pass
else :
print ("not supported by uPy, contact us!")
return
# doc = self.getCurrentScene()
cmds.file(filename ,type=fileExtension,loadReferenceDepth="all", i=True ) #merge the documets
# c4d.documents.MergeDocument(doc,filename,c4d.SCENEFILTER_OBJECTS|c4d.SCENEFILTER_MATERIALS)
def read(self,filename,**kw):
fileName, fileExtension = os.path.splitext(filename)
fileExtension=fileExtension.replace(".","")
fileExtension=fileExtension.upper()
if fileExtension == "MA":
fileExtension = "mayaAscii"
cmds.file(filename ,type=fileExtension,loadReferenceDepth="all", i=True )
elif fileExtension == "DAE" or fileExtension == "FBX":
import maya.mel as mel
#mel.eval('FBXImportMode -v exmerge;')
filename = filename.replace("\\","\\\\")
mel.eval('FBXImport -f "%s" -t 0;' % filename)#FBXGetTakeName ?
else :
print ("not supported by uPy, contact us!")
return
def write(self,listObj,**kw):
pass
#==============================================================================
# raycasting
#==============================================================================
def raycast(self,obj,start, end, length, **kw ):
#posted on cgtalk.com
#part of http://code.google.com/p/dynamica/
mo = self.getTransformation(obj)
mi = mo.asMatrixInverse()
mat = self.ToMat(mi)#.transpose()
point = self.ApplyMatrix([start],numpy.array(mat).transpose())[0]
direction = self.ApplyMatrix([end],numpy.array(mat).transpose())[0]
#om.MGlobal.clearSelectionList()
om.MGlobal.selectByName(obj)
sList = om.MSelectionList()
#Assign current selection to the selection list object
om.MGlobal.getActiveSelectionList(sList)
item = om.MDagPath()
sList.getDagPath(0, item)
item.extendToShape()
fnMesh = om.MFnMesh(item)
raySource = om.MFloatPoint(float(point[0]), float(point[1]), float(point[2]), 1.0)
rayDir = om.MFloatVector(float(direction[0]-point[0]), float(direction[1]-point[1]), float(direction[2]-point[2]))
faceIds = None
triIds = None
idsSorted = False
testBothDirections = False
worldSpace = om.MSpace.kWorld
maxParam = length#999999
accelParams = None
sortHits = True
hitPoints = om.MFloatPointArray()
#hitRayParams = om.MScriptUtil().asFloatPtr()
hitRayParams = om.MFloatArray()
hitFaces = om.MIntArray()
hitTris = None
hitBarys1 = None
hitBarys2 = None
tolerance = 0.0001
#http://download.autodesk.com/us/maya/2010help/API/class_m_fn_mesh.html#114943af4e75410b0172c58b2818398f
hit = fnMesh.allIntersections(raySource, rayDir, faceIds, triIds, idsSorted, worldSpace,
maxParam, testBothDirections, accelParams, sortHits,
hitPoints, hitRayParams, hitFaces, hitTris, hitBarys1,
hitBarys2, tolerance)
om.MGlobal.clearSelectionList()
#print hit, len(hitFaces)
if "count" in kw :
#result = int(fmod(len(hitFaces), 2))
return hit, len(hitFaces)
#clear selection as may cause problem if the function is called multiple times in succession
return result | gpl-3.0 | -7,838,186,313,154,218,000 | 39.361557 | 212 | 0.542971 | false |
phenoxim/nova | nova/tests/json_ref.py | 1 | 2271 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_serialization import jsonutils
def _resolve_ref(ref, base_path):
file_path, _, json_path = ref.partition('#')
if json_path:
raise NotImplementedError('JSON refs with JSON path after the "#" is '
'not yet supported')
path = os.path.join(base_path, file_path)
# binary mode is needed due to bug/1515231
with open(path, 'r+b') as f:
ref_value = jsonutils.load(f)
base_path = os.path.dirname(path)
res = resolve_refs(ref_value, base_path)
return res
def resolve_refs(obj_with_refs, base_path):
if isinstance(obj_with_refs, list):
for i, item in enumerate(obj_with_refs):
obj_with_refs[i] = resolve_refs(item, base_path)
elif isinstance(obj_with_refs, dict):
if '$ref' in obj_with_refs.keys():
ref = obj_with_refs.pop('$ref')
resolved_ref = _resolve_ref(ref, base_path)
# the rest of the ref dict contains overrides for the ref. Apply
# those overrides recursively here.
_update_dict_recursively(resolved_ref, obj_with_refs)
return resolved_ref
else:
for key, value in obj_with_refs.items():
obj_with_refs[key] = resolve_refs(value, base_path)
else:
# scalar, nothing to do
pass
return obj_with_refs
def _update_dict_recursively(d, update):
"""Update dict d recursively with data from dict update"""
for k, v in update.items():
if k in d and isinstance(d[k], dict) and isinstance(v, dict):
_update_dict_recursively(d[k], v)
else:
d[k] = v
| apache-2.0 | 7,998,802,605,201,372,000 | 35.047619 | 78 | 0.625716 | false |
Insoleet/mirage | example.py | 1 | 1205 | import asyncio
import logging
from duniterpy.documents import BlockUID
from mirage import Node, User
async def example(lp):
node = await Node.start(4444, "testnet", "12356", "123456", lp)
alice = User.create("testnet", "alice", "alicesalt", "alicepassword", BlockUID.empty())
bob = User.create("testnet", "bob", "bobsalt", "bobpassword", BlockUID.empty())
node.forge.push(alice.identity())
node.forge.push(bob.identity())
node.forge.push(alice.join(BlockUID.empty()))
node.forge.push(bob.join(BlockUID.empty()))
node.forge.push(alice.certify(bob, BlockUID.empty()))
node.forge.push(bob.certify(alice, BlockUID.empty()))
node.forge.forge_block()
node.forge.set_member(alice.key.pubkey, True)
node.forge.set_member(bob.key.pubkey, True)
node.forge.forge_block()
node.forge.forge_block()
node.forge.generate_dividend()
node.forge.forge_block()
bob.send_money(10, node.forge.user_identities[bob.key.pubkey].sources, bob,
node.forge.blocks[-1].blockUID, "Test receive")
node.forge.forge_block()
logging.getLogger('mirage').setLevel(logging.DEBUG)
lp = asyncio.get_event_loop()
lp.run_until_complete(example(lp))
| gpl-3.0 | -2,815,379,753,276,030,000 | 35.515152 | 91 | 0.692946 | false |
claytantor/coinbase4py | webapp/settings.py | 1 | 4533 | import os
from ConfigParser import RawConfigParser
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.dirname(__file__)
CONF_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
here = lambda x: os.path.join(os.path.abspath(os.path.dirname(__file__)), x)
# you will need to copy the example and make custom
# settings for the environment
config = RawConfigParser()
#place in a dir that is not managed in the code base
# print 'config dir: {0}/conf/gitpatron_settings.ini'.format(CONF_DIR)
config.read('{0}/conf/coinbase4py_settings.ini'.format(CONF_DIR))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.get('secrets','DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.get('debug','DEBUG')
TEMPLATE_DEBUG = config.get('debug','TEMPLATE_DEBUG')
ENVIRONMENT = config.get('base','ENVIRONMENT')
ALLOWED_HOSTS = []
#the database for the app
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'coinbase4py.db'),
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.staticfiles',
'coinbase4py',
'webapp',
'webapp.templatetags',
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages")
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
WSGI_APPLICATION = 'webapp.wsgi.application'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# Additional locations of static files
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, '../', 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
PROJECT_DIR + '/../webapp/templates/',
)
USER_ONE=config.get('coinbase4py','USER_ONE')
USER_TWO=config.get('coinbase4py','USER_TWO')
TEST_STATE_DIR=config.get('coinbase4py','TEST_STATE_DIR')
COINBASE4PY_PW_SECRET_KEY=config.get('coinbase4py','COINBASE4PY_PW_SECRET_KEY')
COINBASE_OAUTH_CLIENT_APP=config.get('coinbase','COINBASE_OAUTH_CLIENT_APP')
COINBASE_OAUTH_CLIENT_ID=config.get('coinbase','COINBASE_OAUTH_CLIENT_ID')
COINBASE_OAUTH_CLIENT_SECRET=config.get('coinbase','COINBASE_OAUTH_CLIENT_SECRET')
COINBASE_OAUTH_CLIENT_CALLBACK=config.get('coinbase','COINBASE_OAUTH_CLIENT_CALLBACK')
COINBASE4PY_APP_URL=config.get('coinbase','COINBASE4PY_APP_URL')
COINBASE_ORDER_CALLBACK='{0}/{1}'.format(
config.get('coinbase','COINBASE4PY_APP_URL'),
config.get('coinbase','COINBASE_ORDER_CALLBACK'))
| apache-2.0 | 6,435,432,027,036,842,000 | 33.869231 | 86 | 0.701302 | false |
KeplerGO/kadenza | setup.py | 1 | 1430 | #!/usr/bin/env python
import os
import sys
from setuptools import setup
# Prepare and send a new release to PyPI
if "release" in sys.argv[-1]:
os.system("python setup.py sdist")
os.system("twine upload dist/*")
os.system("rm -rf dist/kadenza*")
sys.exit()
# Load the __version__ variable without importing the package already
exec(open('kadenza/version.py').read())
# Command-line tools
entry_points = {'console_scripts': [
'kadenza-tpf = kadenza.kadenza:kadenza_tpf_main',
'kadenza-ffi = kadenza.kadenza:kadenza_ffi_main'
]}
setup(name='kadenza',
version=__version__,
description="Converts raw cadence data from the Kepler spacecraft "
"into astronomer-friendly FITS files.",
long_description=open('README.md').read(),
author='Geert Barentsen',
author_email='[email protected]',
license='MIT',
packages=['kadenza'],
install_requires=['numpy>=1.8',
'astropy>=1.1',
'tqdm'],
entry_points=entry_points,
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
| mit | 1,101,567,395,859,211,800 | 31.5 | 73 | 0.61049 | false |
rddim/Notepad-plus-plus | scintilla/qt/ScintillaEdit/WidgetGen.py | 5 | 8222 | #!/usr/bin/env python3
# WidgetGen.py - regenerate the ScintillaWidgetCpp.cpp and ScintillaWidgetCpp.h files
# Check that API includes all gtkscintilla2 functions
import sys
import os
import getopt
scintillaDirectory = "../.."
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
from FileGenerator import GenerateFile
def underscoreName(s):
# Name conversion fixes to match gtkscintilla2
irregular = ['WS', 'EOL', 'AutoC', 'KeyWords', 'BackSpace', 'UnIndents', 'RE', 'RGBA']
for word in irregular:
replacement = word[0] + word[1:].lower()
s = s.replace(word, replacement)
out = ""
for c in s:
if c.isupper():
if out:
out += "_"
out += c.lower()
else:
out += c
return out
def normalisedName(s, options, role=None):
if options["qtStyle"]:
if role == "get":
s = s.replace("Get", "")
return s[0].lower() + s[1:]
else:
return underscoreName(s)
typeAliases = {
"position": "int",
"line": "int",
"pointer": "int",
"colour": "int",
"keymod": "int",
"string": "const char *",
"stringresult": "const char *",
"cells": "const char *",
}
def cppAlias(s):
if s in typeAliases:
return typeAliases[s]
elif Face.IsEnumeration(s):
return "int"
else:
return s
understoodTypes = ["", "void", "int", "bool", "position", "line", "pointer",
"colour", "keymod", "string", "stringresult", "cells"]
def understoodType(t):
return t in understoodTypes or Face.IsEnumeration(t)
def checkTypes(name, v):
understandAllTypes = True
if not understoodType(v["ReturnType"]):
#~ print("Do not understand", v["ReturnType"], "for", name)
understandAllTypes = False
if not understoodType(v["Param1Type"]):
#~ print("Do not understand", v["Param1Type"], "for", name)
understandAllTypes = False
if not understoodType(v["Param2Type"]):
#~ print("Do not understand", v["Param2Type"], "for", name)
understandAllTypes = False
return understandAllTypes
def arguments(v, stringResult, options):
ret = ""
p1Type = cppAlias(v["Param1Type"])
if p1Type == "int":
p1Type = "sptr_t"
if p1Type:
ret = ret + p1Type + " " + normalisedName(v["Param1Name"], options)
p2Type = cppAlias(v["Param2Type"])
if p2Type == "int":
p2Type = "sptr_t"
if p2Type and not stringResult:
if p1Type:
ret = ret + ", "
ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options)
return ret
def printPyFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["val"]:
out.append(name + "=" + v["Value"])
if feat in ["evt"]:
out.append("SCN_" + name.upper() + "=" + v["Value"])
if feat in ["fun"]:
out.append("SCI_" + name.upper() + "=" + v["Value"])
return out
def printHFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
out.append("\t" + returnType + " " + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options)+
")" + constDeclarator + ";")
return out
def methodNames(f, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
yield normalisedName(name, options)
def printCPPFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
featureDefineName = "SCI_" + name.upper()
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
returnStatement = ""
if returnType != "void":
returnStatement = "return "
out.append(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options) +
")" + constDeclarator + " {")
returns = ""
if stringResult:
returns += " " + returnStatement + "TextReturner(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ");"
else:
returns += " " + returnStatement + "send(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ", "
if "*" in cppAlias(v["Param2Type"]):
returns += "(sptr_t)"
if v["Param2Name"]:
returns += normalisedName(v["Param2Name"], options)
else:
returns += "0"
returns += ");"
out.append(returns)
out.append("}")
out.append("")
return out
def gtkNames():
# The full path on my machine: should be altered for anyone else
p = "C:/Users/Neil/Downloads/wingide-source-4.0.1-1/wingide-source-4.0.1-1/external/gtkscintilla2/gtkscintilla.c"
with open(p) as f:
for l in f.readlines():
if "gtk_scintilla_" in l:
name = l.split()[1][14:]
if '(' in name:
name = name.split('(')[0]
yield name
def usage():
print("WidgetGen.py [-c|--clean][-h|--help][-u|--underscore-names]")
print("")
print("Generate full APIs for ScintillaEdit class and ScintillaConstants.py.")
print("")
print("options:")
print("")
print("-c --clean remove all generated code from files")
print("-h --help display this text")
print("-u --underscore-names use method_names consistent with GTK+ standards")
def readInterface(cleanGenerated):
f = Face.Face()
if not cleanGenerated:
f.ReadFromFile("../../include/Scintilla.iface")
return f
def main(argv):
# Using local path for gtkscintilla2 so don't default to checking
checkGTK = False
cleanGenerated = False
qtStyleInterface = True
# The --gtk-check option checks for full coverage of the gtkscintilla2 API but
# depends on a particular directory so is not mentioned in --help.
opts, args = getopt.getopt(argv, "hcgu", ["help", "clean", "gtk-check", "underscore-names"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-g", "--gtk-check"):
checkGTK = True
elif opt in ("-u", "--underscore-names"):
qtStyleInterface = False
options = {"qtStyle": qtStyleInterface}
f = readInterface(cleanGenerated)
try:
GenerateFile("ScintillaEdit.cpp.template", "ScintillaEdit.cpp",
"/* ", True, printCPPFile(f, options))
GenerateFile("ScintillaEdit.h.template", "ScintillaEdit.h",
"/* ", True, printHFile(f, options))
GenerateFile("../ScintillaEditPy/ScintillaConstants.py.template",
"../ScintillaEditPy/ScintillaConstants.py",
"# ", True, printPyFile(f, options))
if checkGTK:
names = set(methodNames(f))
#~ print("\n".join(names))
namesGtk = set(gtkNames())
for name in namesGtk:
if name not in names:
print(name, "not found in Qt version")
for name in names:
if name not in namesGtk:
print(name, "not found in GTK+ version")
except:
raise
if cleanGenerated:
for file in ["ScintillaEdit.cpp", "ScintillaEdit.h", "../ScintillaEditPy/ScintillaConstants.py"]:
try:
os.remove(file)
except OSError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | -2,808,446,952,764,096,500 | 28.679104 | 114 | 0.610071 | false |
rdmorganiser/rdmo | rdmo/projects/tests/test_view_project_update_import.py | 1 | 14683 | import os
import re
from pathlib import Path
import pytest
from django.urls import reverse
from rdmo.core.constants import VALUE_TYPE_FILE
from ..models import Project, Value
users = (
('owner', 'owner'),
('manager', 'manager'),
('author', 'author'),
('guest', 'guest'),
('user', 'user'),
('site', 'site'),
('anonymous', None),
)
view_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'author': [1, 3, 5],
'guest': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
change_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
projects = [1, 2, 3, 4, 5]
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_import', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert response.url == '/projects/{}/'.format(project_id)
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_error(db, settings, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_import', args=[project_id])
response = client.post(url, {
'method': 'wrong'
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 400
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_upload_file(db, settings, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_import', args=[project_id])
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
assert b'Import from project.xml' in response.content
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_upload_file_error(db, settings, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_import', args=[project_id])
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'error.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 400
assert b'Files of this type cannot be imported.' in response.content
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_upload_file_empty(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_import', args=[project_id])
response = client.post(url, {
'method': 'upload_file'
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 400
assert b'There has been an error with your import.' in response.content
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_import_file(db, settings, client, files, username, password, project_id):
client.login(username=username, password=password)
projects_count = Project.objects.count()
project = Project.objects.get(pk=project_id)
project_updated = project.updated
project_snapshot_count = project.snapshots.count()
project_snapshot_values_count = project.values.filter(snapshot=None).count()
project_values_count = project.values.count()
# upload file
url = reverse('project_update_import', args=[project_id])
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
# get keys from the response
keys = re.findall(r'name=\"(.*?)\"', response.content.decode())
# import file
url = reverse('project_update_import', args=[project_id])
data = {key: ['on'] for key in keys}
data.update({'method': 'import_file'})
response = client.post(url, data)
# check if all the files are where are supposed to be
for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE):
assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists()
# no new project, snapshots, values were created
project = Project.objects.get(pk=project_id)
assert Project.objects.count() == projects_count
assert project.snapshots.count() == project_snapshot_count
if project_id == 1:
assert project.values.count() == project_values_count
assert project.values.filter(snapshot=None).count() == project_snapshot_values_count
assert project.updated == project_updated
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert response.url == '/projects/{}/'.format(project_id)
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_import_file_empty(db, settings, client, username, password, project_id):
client.login(username=username, password=password)
projects_count = Project.objects.count()
project = Project.objects.get(pk=project_id)
project_updated = project.updated
project_snapshot_count = project.snapshots.count()
project_snapshot_values_count = project.values.filter(snapshot=None).count()
project_values_count = project.values.count()
# upload file
url = reverse('project_update_import', args=[project_id])
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {
'method': 'upload_file',
'uploaded_file': f
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200, project_id
response = client.post(url, {
'method': 'import_file'
})
# check if all the files are where are supposed to be
for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE):
assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists()
# no new project, snapshots, values were created
project = Project.objects.get(pk=project_id)
assert Project.objects.count() == projects_count
assert project.snapshots.count() == project_snapshot_count
assert project.values.count() == project_values_count
assert project.values.filter(snapshot=None).count() == project_snapshot_values_count
assert project.updated == project_updated
assert response.status_code == 302
assert response.url == '/projects/{}/'.format(project_id)
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('source_id', projects)
def test_project_update_import_post_import_project_step1(db, settings, client, username, password, project_id, source_id):
client.login(username=username, password=password)
url = reverse('project_update_import', args=[project_id])
response = client.post(url, {
'method': 'import_project',
'source': source_id
})
if project_id in change_project_permission_map.get(username, []):
if source_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
else:
assert response.status_code == 403
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('source_id', projects)
def test_project_update_import_post_import_project_step2(db, settings, client, username, password, project_id, source_id):
client.login(username=username, password=password)
projects_count = Project.objects.count()
project = Project.objects.get(pk=project_id)
project_updated = project.updated
project_snapshot_count = project.snapshots.count()
project_snapshot_values_count = project.values.filter(snapshot=None).count()
project_values_count = project.values.count()
source = Project.objects.get(pk=source_id)
source_snapshot_count = source.snapshots.count()
source_snapshot_values_count = source.values.filter(snapshot=None).count()
source_values_count = source.values.count()
url = reverse('project_update_import', args=[project_id])
response = client.post(url, {
'method': 'import_project',
'source': source_id
})
if project_id in change_project_permission_map.get(username, []):
if source_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
# get keys from the response
keys = re.findall(r'name=\"(.*?)\"', response.content.decode())
# import file
url = reverse('project_update_import', args=[project_id])
data = {key: ['on'] for key in keys}
data.update({
'method': 'import_project',
'source': source_id
})
response = client.post(url, data)
# check if all the files are where are supposed to be
for file_value in Value.objects.filter(value_type=VALUE_TYPE_FILE):
assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists()
# no new project, snapshots, values were created
project = Project.objects.get(pk=project_id)
source = Project.objects.get(pk=source_id)
# no new project was created
assert Project.objects.count() == projects_count
# the project has the correct count of snapshot and values
assert project.snapshots.count() == project_snapshot_count
if project_id == 1:
assert project.values.count() == project_values_count
assert project.values.filter(snapshot=None).count() == project_snapshot_values_count
# the source project has the correct count of snapshot and values
assert source.snapshots.count() == source_snapshot_count
if source_id == 1:
assert source.values.count() == source_values_count
assert source.values.filter(snapshot=None).count() == source_snapshot_values_count
assert project.updated == project_updated
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert response.url == '/projects/{}/'.format(project_id)
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
else:
assert response.status_code == 403
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_import_post_import_project_error(db, settings, client, username, password, project_id):
client.login(username=username, password=password)
# upload file
url = reverse('project_update_import', args=[project_id])
response = client.post(url, {
'method': 'import_project'
})
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 404
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert response.url.startswith('/account/login/')
| apache-2.0 | 2,661,007,531,408,466,000 | 37.03886 | 122 | 0.643397 | false |
encukou/freeipa | ipatests/test_cmdline/test_help.py | 4 | 5658 | # Authors: Petr Viktorin <[email protected]>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
from io import StringIO
import shutil
import errno
import six
from ipalib import api, errors
from ipaserver.plugins.user import user_add
import pytest
if six.PY3:
unicode = str
pytestmark = pytest.mark.needs_ipaapi
@pytest.mark.tier0
class CLITestContext:
"""Context manager that replaces stdout & stderr, and catches SystemExit
Whatever was printed to the streams is available in ``stdout`` and
``stderr`` attrributes once the with statement finishes.
When exception is given, asserts that exception is raised. The exception
will be available in the ``exception`` attribute.
"""
def __init__(self, exception=None):
self.exception = exception
def __enter__(self):
self.old_streams = sys.stdout, sys.stderr
self.stdout_fileobj = sys.stdout = StringIO()
self.stderr_fileobj = sys.stderr = StringIO()
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout, sys.stderr = self.old_streams
self.stdout = self.stdout_fileobj.getvalue()
self.stderr = self.stderr_fileobj.getvalue()
self.stdout_fileobj.close()
self.stderr_fileobj.close()
if self.exception:
if not isinstance(exc_value, self.exception):
return False
self.exception = exc_value
return True
else:
return None
def test_ipa_help():
"""Test that `ipa help` only writes to stdout"""
with CLITestContext() as ctx:
return_value = api.Backend.cli.run(['help'])
assert return_value == 0
assert ctx.stderr == ''
def test_ipa_help_without_cache():
"""Test `ipa help` without schema cache"""
cache_dir = os.path.expanduser('~/.cache/ipa/schema/')
backup_dir = os.path.expanduser('~/.cache/ipa/schema.bak/')
shutil.rmtree(backup_dir, ignore_errors=True)
if os.path.isdir(cache_dir):
os.rename(cache_dir, backup_dir)
try:
with CLITestContext() as ctx:
return_value = api.Backend.cli.run(['help'])
assert return_value == 0
assert ctx.stderr == ''
finally:
shutil.rmtree(cache_dir, ignore_errors=True)
try:
os.rename(backup_dir, cache_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def test_ipa_without_arguments():
"""Test that `ipa` errors out, and prints the help to stderr"""
with CLITestContext(exception=SystemExit) as ctx:
api.Backend.cli.run([])
assert ctx.exception.code == 2
assert ctx.stdout == ''
assert 'Error: Command not specified' in ctx.stderr
with CLITestContext() as help_ctx:
api.Backend.cli.run(['help'])
assert help_ctx.stdout in ctx.stderr
def test_bare_topic():
"""Test that `ipa user` errors out, and prints the help to stderr
This is because `user` is a topic, not a command, so `ipa user` doesn't
match our usage string. The help should be accessed using `ipa help user`.
"""
with CLITestContext(exception=errors.CommandError) as ctx:
api.Backend.cli.run(['user'])
assert ctx.exception.name == 'user'
assert ctx.stdout == ''
with CLITestContext() as help_ctx:
return_value = api.Backend.cli.run(['help', 'user'])
assert return_value == 0
assert help_ctx.stdout in ctx.stderr
def test_command_help():
"""Test that `help user-add` & `user-add -h` are equivalent and contain doc
"""
with CLITestContext() as help_ctx:
return_value = api.Backend.cli.run(['help', 'user-add'])
assert return_value == 0
assert help_ctx.stderr == ''
with CLITestContext(exception=SystemExit) as h_ctx:
api.Backend.cli.run(['user-add', '-h'])
assert h_ctx.exception.code == 0
assert h_ctx.stderr == ''
assert h_ctx.stdout == help_ctx.stdout
assert unicode(user_add.doc) in help_ctx.stdout
def test_ambiguous_command_or_topic():
"""Test that `help ping` & `ping -h` are NOT equivalent
One is a topic, the other is a command
"""
with CLITestContext() as help_ctx:
return_value = api.Backend.cli.run(['help', 'ping'])
assert return_value == 0
assert help_ctx.stderr == ''
with CLITestContext(exception=SystemExit) as h_ctx:
api.Backend.cli.run(['ping', '-h'])
assert h_ctx.exception.code == 0
assert h_ctx.stderr == ''
assert h_ctx.stdout != help_ctx.stdout
def test_multiline_description():
"""Test that all of a multi-line command description appears in output
"""
# This assumes trust_add has multiline doc. Ensure it is so.
assert '\n\n' in unicode(api.Command.trust_add.doc).strip()
with CLITestContext(exception=SystemExit) as help_ctx:
api.Backend.cli.run(['trust-add', '-h'])
assert unicode(api.Command.trust_add.doc).strip() in help_ctx.stdout
| gpl-3.0 | -383,832,913,949,688,060 | 31.147727 | 79 | 0.65783 | false |
neocogent/electrum | setup.py | 1 | 3017 | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
import os
import sys
import platform
import importlib.util
import argparse
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
MIN_PYTHON_VERSION = "3.6.1"
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
# load version.py; needlessly complicated alternative to "imp.load_source":
version_spec = importlib.util.spec_from_file_location('version', 'electrum/version.py')
version_module = version = importlib.util.module_from_spec(version_spec)
version_spec.loader.exec_module(version_module)
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['electrum/gui/icons/electrum.png']),
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
'gui': ['pyqt5'],
}
extras_require['full'] = [pkg for sublist in list(extras_require.values()) for pkg in sublist]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
python_requires='>={}'.format(MIN_PYTHON_VERSION),
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum.gui',
'electrum.gui.qt',
'electrum.plugins',
] + [('electrum.plugins.'+pkg) for pkg in find_packages('electrum/plugins')],
package_dir={
'electrum': 'electrum'
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
'electrum.gui': [
'icons/*',
],
},
scripts=['electrum/electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="[email protected]",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet""",
)
| mit | 6,636,443,891,689,976,000 | 31.095745 | 94 | 0.640371 | false |
M4rtinK/tsubame | core/platform/base_platform_module.py | 1 | 8584 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Base class for Tsubame platform modules.
#----------------------------------------------------------------------------
# Copyright 2017, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from core import constants
from core.signal import Signal
class PlatformModule(object):
"""A Tsubame base platform module."""
def __init__(self):
self.internet_connectivity_changed = Signal()
@property
def platform_id(self):
"""Return an unique string identifying the device module."""
return None
@property
def device_name(self):
"""Return a human readable name of the device."""
return "unknown device"
@property
def preferred_window_wh(self):
"""Return the preferred application window size in pixels."""
# we'll use VGA as a default value
return 640, 480
@property
def start_in_fullscreen(self):
"""Return if Tsubame should be started fullscreen.
NOTE: this is a default value and can be overridden by a
user-set options key, etc.
"""
return False
@property
def fullscreen_only(self):
"""Report if the platform is fullscreen-only.
Some platforms are basically fullscreen-only (Harmattan),
as applications only switch between fullscreen and a task switcher.
"""
return False
@property
def screen_blanking_control_supported(self):
"""There is no universal way to control screen blanking, so its off by default.
NOTE: Screen blanking can be implemented and enabled in the corresponding
device or gui module.
"""
return False
def pause_screen_blanking(self):
"""Pause screen blanking controlled by device module.
calling this method should pause screen blanking
* on mobile devices, screen balking needs to be paused every n seconds
* on desktop, one call might be enough, still, several calls should
be handled without issues
* also what about restoring the screen blanking on Desktop
once Tsubame exits ?
"""
pass
@property
def supported_gui_module_ids(self):
"""Supported GUI module IDs, ordered by preference from left to right.
THE ":" NOTATION
single GUI modules might support different subsets, the usability of
these subsets can vary based on the current platform
-> this functions enabled device modules to report which GUI subsets
are most suitable for the given platform
-> the string starts with the module id prefix, is separated by : and
continues with the subset id
EXAMPLE: ["QML:harmattan","QML:indep","GTK"]
-> QML GUI with Harmattan Qt Components is preferred,
QML GUI with platform independent Qt Components is less preferred
and the GTK GUI is set as a fallback if everything else fails
CURRENT USAGE
there are different incompatible native Qt Component sets
on various platforms (Harmattan QTC, Plasma Active QTC, Jolla QTC,...)
the QML GUI aims to support most of these components sets to provide
native look & feel and the subset id is used by the device module
to signal the GUI module which QTC component to use
"""
return ["qt5"] # the Qt 5 GUI is the default
@property
def has_notification_support(self):
"""Report if the device provides its own notification method."""
return False
def notify(self, message, msTimeout=0, icon=""):
"""Send a notification using platform/device specific API."""
pass
@property
def has_keyboard(self):
"""Report if the device has a hardware keyboard."""
return True
@property
def has_buttons(self):
"""Report if the device has some usable buttons other than a hardware keyboard."""
if self.has_volume_keys:
return True
else:
return False
@property
def has_volume_keys(self):
"""Report if the device has application-usable volume control keys or their equivalent.
Basically basically just two nearby button that can be used for zooming up/down,
skipping to next/previous and similar actions.
"""
return False
def enable_volume_keys(self):
pass
@property
def profile_path(self):
"""Return path to the main profile folder or None if default path should be used.
:returns: path to the profile folder or None
:rtype: str or None
"""
return None
@property
def needs_quit_button(self):
"""On some platforms applications need to provide their own shutdown buttons."""
return False
@property
def needs_back_button(self):
"""Some platforms (Sailfish OS) don't need a in-UI back button."""
return True
@property
def needs_page_background(self):
"""Some platforms (Sailfish OS) don't need a page background."""
return True
@property
def handles_url_opening(self):
"""Some platform provide specific APIs for URL opening.
For example, on the N900 a special DBUS command not available
elsewhere needs to be used.
"""
return False
def open_url(self, url):
"""Open a URL."""
import webbrowser
webbrowser.open(url)
@property
def connectivity_status(self):
"""Report the current status of internet connectivity on the device.
None - status reporting not supported or status unknown
True - connected to the Internet
False - disconnected from the Internet
"""
connected = constants.InternetConnectivityStatus.OFFLINE
# open the /proc/net/route file
with open('/proc/net/route', 'r') as f:
for line in f:
# the line is delimited by tabulators
lineSplit = line.split('\t')
# check if the length is valid
if len(lineSplit) >= 11:
if lineSplit[1] == '00000000' and lineSplit[7] == '00000000':
# if destination and mask are 00000000,
# it is probably an Internet connection
connected = constants.InternetConnectivityStatus.ONLINE
break
return connected
def enable_internet_connectivity(self):
"""Try to make sure that the device connects to the Internet."""
pass
@property
def device_type(self):
"""Returns type of the current device.
The device can currently be either a PC
(desktop or laptop/notebook),
smartphone or a tablet.
This is currently used mainly for rough
DPI estimation.
Example:
* high resolution & PC -> low DPI
* high resolution & smartphone -> high DPI
* high resolution & smartphone -> low DPI
This could also be used in the future to
use different PC/smartphone/tablet GUI styles.
By default, the device type is unknown.
"""
return None
@property
def qmlscene_command(self):
"""What should be called to start the qmlscene.
:returns: command to run to start qmlscene
:rtype: str
"""
return "qmlscene"
@property
def universal_components_backend(self):
"""Path to a Universal Components backend suitable for the given platform.
We default to the Controls UC backend.
:returns: path to suitable UC backend
:rtype: str
"""
return "controls"
| gpl-3.0 | -3,431,817,031,588,530,700 | 33.894309 | 95 | 0.612069 | false |
JazzeYoung/VeryDeepAutoEncoder | theano/tensor/tests/test_extra_ops.py | 1 | 36192 | from __future__ import absolute_import, print_function, division
import numpy as np
import numpy
import theano
from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (SearchsortedOp, searchsorted,
CumsumOp, cumsum, CumprodOp, cumprod,
CpuContiguous, cpu_contiguous, BinCountOp,
bincount, DiffOp, diff, squeeze, compress,
RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal,
FillDiagonalOffset, fill_diagonal_offset,
to_one_hot, Unique)
from theano import tensor as T
from theano import config, tensor, function
from theano.tests.unittest_tools import attr
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_16 = bool(numpy_ver >= [1, 6])
def test_cpu_contiguous():
a = T.fmatrix('a')
i = T.iscalar('i')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
f = theano.function([a, i], cpu_contiguous(a.reshape((5, 4))[::i]))
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, CpuContiguous) for node in topo])
assert f(a_val, 1).flags['C_CONTIGUOUS']
assert f(a_val, 2).flags['C_CONTIGUOUS']
assert f(a_val, 3).flags['C_CONTIGUOUS']
# Test the grad:
theano.tests.unittest_tools.verify_grad(cpu_contiguous,
[numpy.random.rand(5, 7, 2)])
class TestSearchsortedOp(utt.InferShapeTester):
def setUp(self):
super(TestSearchsortedOp, self).setUp()
self.op_class = SearchsortedOp
self.op = SearchsortedOp()
self.x = T.vector('x')
self.v = T.tensor3('v')
self.a = 30 * np.random.random(50).astype(config.floatX)
self.b = 30 * np.random.random((8, 10, 5)).astype(config.floatX)
self.idx_sorted = np.argsort(self.a).astype('int32')
def test_searchsortedOp_on_sorted_input(self):
f = theano.function([self.x, self.v], searchsorted(self.x, self.v))
assert np.allclose(np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
sorter = T.vector('sorter', dtype='int32')
f = theano.function([self.x, self.v, sorter], self.x.searchsorted(self.v, sorter=sorter, side='right'))
assert np.allclose(self.a.searchsorted(self.b, sorter=self.idx_sorted, side='right'),
f(self.a, self.b, self.idx_sorted))
sa = self.a[self.idx_sorted]
f = theano.function([self.x, self.v], self.x.searchsorted(self.v, side='right'))
assert np.allclose(sa.searchsorted(self.b, side='right'), f(sa, self.b))
def test_searchsortedOp_wrong_side_kwd(self):
self.assertRaises(ValueError, searchsorted, self.x, self.v, side='asdfa')
def test_searchsortedOp_on_no_1d_inp(self):
no_1d = T.dmatrix('no_1d')
self.assertRaises(ValueError, searchsorted, no_1d, self.v)
self.assertRaises(ValueError, searchsorted, self.x, self.v, sorter=no_1d)
def test_searchsortedOp_on_float_sorter(self):
sorter = T.vector('sorter', dtype="float32")
self.assertRaises(TypeError, searchsorted,
self.x, self.v, sorter=sorter)
def test_searchsortedOp_on_int_sorter(self):
compatible_types = ('int8', 'int16', 'int32')
if theano.configdefaults.python_int_bitwidth() == 64:
compatible_types += ('int64',)
# 'uint8', 'uint16', 'uint32', 'uint64')
for dtype in compatible_types:
sorter = T.vector('sorter', dtype=dtype)
f = theano.function([self.x, self.v, sorter],
searchsorted(self.x, self.v, sorter=sorter),
allow_input_downcast=True)
assert np.allclose(np.searchsorted(self.a, self.b, sorter=self.idx_sorted),
f(self.a, self.b, self.idx_sorted))
def test_searchsortedOp_on_right_side(self):
f = theano.function([self.x, self.v],
searchsorted(self.x, self.v, side='right'))
assert np.allclose(np.searchsorted(self.a, self.b, side='right'),
f(self.a, self.b))
def test_infer_shape(self):
# Test using default parameters' value
self._compile_and_check([self.x, self.v],
[searchsorted(self.x, self.v)],
[self.a[self.idx_sorted], self.b],
self.op_class)
# Test parameter ``sorter``
sorter = T.vector('sorter', dtype="int32")
self._compile_and_check([self.x, self.v, sorter],
[searchsorted(self.x, self.v, sorter=sorter)],
[self.a, self.b, self.idx_sorted],
self.op_class)
# Test parameter ``side``
la = np.ones(10).astype(config.floatX)
lb = np.ones(shape=(1, 2, 3)).astype(config.floatX)
self._compile_and_check([self.x, self.v],
[searchsorted(self.x, self.v, side='right')],
[la, lb],
self.op_class)
def test_grad(self):
utt.verify_grad(self.op, [self.a[self.idx_sorted], self.b])
class TestCumsumOp(utt.InferShapeTester):
def setUp(self):
super(TestCumsumOp, self).setUp()
self.op_class = CumsumOp
self.op = CumsumOp()
def test_cumsumOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis out of bounds
self.assertRaises(ValueError, cumsum, x, axis=3)
self.assertRaises(ValueError, cumsum, x, axis=-4)
f = theano.function([x], cumsum(x))
assert np.allclose(np.cumsum(a), f(a)) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
f = theano.function([x], cumsum(x, axis=axis))
assert np.allclose(np.cumsum(a, axis=axis), f(a))
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[cumsum(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a], eps=4e-4)
class TestCumprodOp(utt.InferShapeTester):
def setUp(self):
super(TestCumprodOp, self).setUp()
self.op_class = CumprodOp
self.op = CumprodOp()
def test_CumprodOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis out of bounds
self.assertRaises(ValueError, cumprod, x, axis=3)
self.assertRaises(ValueError, cumprod, x, axis=-4)
f = theano.function([x], cumprod(x))
assert np.allclose(np.cumprod(a), f(a)) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
f = theano.function([x], cumprod(x, axis=axis))
assert np.allclose(np.cumprod(a, axis=axis), f(a))
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[cumprod(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a])
class TestBinCountOp(utt.InferShapeTester):
def setUp(self):
super(TestBinCountOp, self).setUp()
self.op_class = BinCountOp
self.op = BinCountOp()
def test_bincountFn(self):
w = T.vector('w')
def ref(data, w=None, minlength=None):
size = int(data.max() + 1)
if minlength:
size = max(size, minlength)
if w is not None:
out = np.zeros(size, dtype=w.dtype)
for i in range(data.shape[0]):
out[data[i]] += w[i]
else:
out = np.zeros(size, dtype=a.dtype)
for i in range(data.shape[0]):
out[data[i]] += 1
return out
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
x = T.vector('x', dtype=dtype)
a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], bincount(x))
f2 = theano.function([x, w], bincount(x, weights=w))
assert (ref(a) == f1(a)).all()
assert np.allclose(ref(a, weights), f2(a, weights))
f3 = theano.function([x], bincount(x, minlength=55))
f4 = theano.function([x], bincount(x, minlength=5))
assert (ref(a, minlength=55) == f3(a)).all()
assert (ref(a, minlength=5) == f4(a)).all()
# skip the following test when using unsigned ints
if not dtype.startswith('u'):
a[0] = -1
f5 = theano.function([x], bincount(x, assert_nonneg=True))
self.assertRaises(AssertionError, f5, a)
def test_bincountOp(self):
w = T.vector('w')
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], BinCountOp()(x, weights=None))
f2 = theano.function([x, w], BinCountOp()(x, weights=w))
assert (np.bincount(a) == f1(a)).all()
assert np.allclose(np.bincount(a, weights=weights),
f2(a, weights))
if not numpy_16:
continue
f3 = theano.function([x], BinCountOp(minlength=23)(x, weights=None))
f4 = theano.function([x], BinCountOp(minlength=5)(x, weights=None))
assert (np.bincount(a, minlength=23) == f3(a)).all()
assert (np.bincount(a, minlength=5) == f4(a)).all()
@attr('slow')
def test_infer_shape(self):
for dtype in tensor.discrete_dtypes:
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
self._compile_and_check([x],
[BinCountOp()(x, None)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
weights = np.random.random((25,)).astype(config.floatX)
self._compile_and_check([x],
[BinCountOp()(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
if not numpy_16:
continue
self._compile_and_check([x],
[BinCountOp(minlength=60)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp(minlength=5)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
class TestDiffOp(utt.InferShapeTester):
nb = 10 # Number of time iterating for n
def setUp(self):
super(TestDiffOp, self).setUp()
self.op_class = DiffOp
self.op = DiffOp()
def test_diffOp(self):
x = T.matrix('x')
a = np.random.random((30, 50)).astype(config.floatX)
f = theano.function([x], diff(x))
assert np.allclose(np.diff(a), f(a))
for axis in range(len(a.shape)):
for k in range(TestDiffOp.nb):
g = theano.function([x], diff(x, n=k, axis=axis))
assert np.allclose(np.diff(a, n=k, axis=axis), g(a))
def test_infer_shape(self):
x = T.matrix('x')
a = np.random.random((30, 50)).astype(config.floatX)
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(len(a.shape)):
for k in range(TestDiffOp.nb):
self._compile_and_check([x],
[diff(x, n=k, axis=axis)],
[a],
self.op_class)
def test_grad(self):
x = T.vector('x')
a = np.random.random(50).astype(config.floatX)
theano.function([x], T.grad(T.sum(diff(x)), x))
utt.verify_grad(self.op, [a])
for k in range(TestDiffOp.nb):
theano.function([x], T.grad(T.sum(diff(x, n=k)), x))
utt.verify_grad(DiffOp(n=k), [a], eps=7e-3)
class SqueezeTester(utt.InferShapeTester):
shape_list = [(1, 3),
(1, 2, 3),
(1, 5, 1, 1, 6)]
broadcast_list = [[True, False],
[True, False, False],
[True, False, True, True, False]]
def setUp(self):
super(SqueezeTester, self).setUp()
self.op = squeeze
def test_op(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([variable], self.op(variable))
expected = numpy.squeeze(data)
tested = f(data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
def test_infer_shape(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
self._compile_and_check([variable],
[self.op(variable)],
[data],
tensor.DimShuffle,
warn=False)
def test_grad(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
utt.verify_grad(self.op, [data])
def test_var_interface(self):
# same as test_op, but use a_theano_var.squeeze.
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([variable], variable.squeeze())
expected = numpy.squeeze(data)
tested = f(data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
class CompressTester(utt.InferShapeTester):
axis_list = [None,
-1,
0,
0,
0,
1]
cond_list = [[1, 0, 1, 0, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 0],
[],
[0, 0, 0, 0],
[1, 1, 0, 1, 0]]
shape_list = [(2, 3),
(4, 3),
(4, 3),
(4, 3),
(4, 3),
(3, 5)]
def setUp(self):
super(CompressTester, self).setUp()
self.op = compress
def test_op(self):
for axis, cond, shape in zip(self.axis_list, self.cond_list,
self.shape_list):
cond_var = theano.tensor.ivector()
data = numpy.random.random(size=shape).astype(theano.config.floatX)
data_var = theano.tensor.matrix()
f = theano.function([cond_var, data_var],
self.op(cond_var, data_var, axis=axis))
expected = numpy.compress(cond, data, axis=axis)
tested = f(cond, data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
class TestRepeatOp(utt.InferShapeTester):
def _possible_axis(self, ndim):
return [None] + list(range(ndim)) + [-i for i in range(ndim)]
def setUp(self):
super(TestRepeatOp, self).setUp()
self.op_class = RepeatOp
self.op = RepeatOp()
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
ptr_bitwidth = theano.configdefaults.local_bitwidth()
if ptr_bitwidth == 64:
self.numpy_unsupported_dtypes = ('uint64',)
if ptr_bitwidth == 32:
self.numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
def test_repeatOp(self):
for ndim in range(3):
x = T.TensorType(config.floatX, [False] * ndim)()
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if (dtype == 'uint64' or
(dtype in self.numpy_unsupported_dtypes and
r_var.ndim == 1)):
self.assertRaises(TypeError, repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
self.assertRaises(TypeError,
repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
# check when r is a list of single integer, e.g. [3].
r = np.random.random_integers(
10, size=()).astype(dtype) + 2
f = theano.function([x],
repeat(x, [r], axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a))
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
# check when r is theano tensortype that broadcastable is (True,)
r_var = theano.tensor.TensorType(broadcastable=(True,),
dtype=dtype)()
r = np.random.random_integers(5, size=(1,)).astype(dtype)
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r[0], axis=axis),
f(a, r))
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
@attr('slow')
def test_infer_shape(self):
for ndim in range(4):
x = T.TensorType(config.floatX, [False] * ndim)()
shp = (numpy.arange(ndim) + 1) * 5
a = np.random.random(shp).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes:
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var)
else:
self._compile_and_check([x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
elif a.size > 0:
r = np.random.random_integers(
5, size=a.shape[axis]).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
def test_grad(self):
for ndim in range(3):
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
utt.verify_grad(lambda x: RepeatOp(axis=axis)(x, 3), [a])
def test_broadcastable(self):
x = T.TensorType(config.floatX, [False, True, False])()
r = RepeatOp(axis=1)(x, 2)
self.assertEqual(r.broadcastable, (False, False, False))
r = RepeatOp(axis=1)(x, 1)
self.assertEqual(r.broadcastable, (False, True, False))
r = RepeatOp(axis=0)(x, 2)
self.assertEqual(r.broadcastable, (False, True, False))
class TestBartlett(utt.InferShapeTester):
def setUp(self):
super(TestBartlett, self).setUp()
self.op_class = Bartlett
self.op = bartlett
def test_perform(self):
x = tensor.lscalar()
f = function([x], self.op(x))
M = numpy.random.random_integers(3, 50, size=())
assert numpy.allclose(f(M), numpy.bartlett(M))
assert numpy.allclose(f(0), numpy.bartlett(0))
assert numpy.allclose(f(-1), numpy.bartlett(-1))
b = numpy.array([17], dtype='uint8')
assert numpy.allclose(f(b[0]), numpy.bartlett(b[0]))
def test_infer_shape(self):
x = tensor.lscalar()
self._compile_and_check([x], [self.op(x)],
[numpy.random.random_integers(3, 50, size=())],
self.op_class)
self._compile_and_check([x], [self.op(x)], [0], self.op_class)
self._compile_and_check([x], [self.op(x)], [1], self.op_class)
class TestFillDiagonal(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestFillDiagonal, self).setUp()
self.op_class = FillDiagonal
self.op = fill_diagonal
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
for shp in [(8, 8), (5, 8), (8, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out), val)
assert (out == val).sum() == min(a.shape)
# test for 3d tensor
a = numpy.random.rand(3, 3, 3).astype(config.floatX)
x = tensor.tensor3()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
val = numpy.cast[config.floatX](numpy.random.rand() + 10)
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val
assert out[1, 1, 1] == val
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape)
@attr('slow')
def test_gradient(self):
utt.verify_grad(fill_diagonal, [numpy.random.rand(5, 8),
numpy.random.rand()],
n_tests=1, rng=TestFillDiagonal.rng)
utt.verify_grad(fill_diagonal, [numpy.random.rand(8, 5),
numpy.random.rand()],
n_tests=1, rng=TestFillDiagonal.rng)
def test_infer_shape(self):
z = tensor.dtensor3()
x = tensor.dmatrix()
y = tensor.dscalar()
self._compile_and_check([x, y], [self.op(x, y)],
[numpy.random.rand(8, 5),
numpy.random.rand()],
self.op_class)
self._compile_and_check([z, y], [self.op(z, y)],
# must be square when nd>2
[numpy.random.rand(8, 8, 8),
numpy.random.rand()],
self.op_class,
warn=False)
class TestFillDiagonalOffset(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestFillDiagonalOffset, self).setUp()
self.op_class = FillDiagonalOffset
self.op = fill_diagonal_offset
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
z = tensor.iscalar()
f = function([x, y, z], fill_diagonal_offset(x, y, z))
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val, test_offset)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out, test_offset), val)
if test_offset >= 0:
assert (out == val).sum() == min(min(a.shape),
a.shape[1] - test_offset)
else:
assert (out == val).sum() == min(min(a.shape),
a.shape[0] + test_offset)
def test_gradient(self):
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
# input 'offset' will not be tested
def fill_diagonal_with_fix_offset(a, val):
return fill_diagonal_offset(a, val, test_offset)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 8), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(8, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
def test_infer_shape(self):
x = tensor.dmatrix()
y = tensor.dscalar()
z = tensor.iscalar()
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(8, 5),
numpy.random.rand(),
test_offset],
self.op_class)
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(5, 8),
numpy.random.rand(),
test_offset],
self.op_class)
def test_to_one_hot():
v = theano.tensor.ivector()
o = to_one_hot(v, 10)
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == theano.config.floatX
assert numpy.allclose(
out,
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
v = theano.tensor.ivector()
o = to_one_hot(v, 10, dtype="int32")
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == "int32"
assert numpy.allclose(
out,
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
class test_Unique(utt.InferShapeTester):
def setUp(self):
super(test_Unique, self).setUp()
self.op_class = Unique
self.ops = [Unique(),
Unique(True),
Unique(False, True),
Unique(True, True)]
if bool(numpy_ver >= [1, 9]):
self.ops.extend([
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
def test_basic_vector(self):
"""
Basic test for a vector.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.vector()
inp = np.asarray([2, 1, 3, 2], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_basic_matrix(self):
""" Basic test for a matrix.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.matrix()
inp = np.asarray([[2, 1], [3, 2], [2, 3]], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_infer_shape_vector(self):
"""
Testing the infer_shape with a vector.
"""
x = theano.tensor.vector()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index:
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([2, 1, 3, 2]),
dtype=config.floatX)],
self.op_class)
def test_infer_shape_matrix(self):
"""
Testing the infer_shape with a matrix.
"""
x = theano.tensor.matrix()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index:
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([[2, 1], [3, 2], [2, 3]]),
dtype=config.floatX)],
self.op_class)
| bsd-3-clause | -2,868,292,713,453,321,000 | 39.573991 | 111 | 0.475851 | false |
avlach/univbris-ocf | vt_manager/src/python/vt_manager/communication/sfa/util/callids.py | 2 | 2280 | #!/usr/bin/python
import threading
import time
#from vt_manager.communication.sfa.util.sfalogging import logger
"""
Callids: a simple mechanism to remember the call ids served so fas
memory-only for now - thread-safe
implemented as a (singleton) hash 'callid'->timestamp
"""
debug=False
class _call_ids_impl (dict):
_instance = None
# 5 minutes sounds amply enough
purge_timeout=5*60
# when trying to get a lock
retries=10
# in ms
wait_ms=100
def __init__(self):
self._lock=threading.Lock()
# the only primitive
# return True if the callid is unknown, False otherwise
def already_handled (self,call_id):
# if not provided in the call...
if not call_id: return False
has_lock=False
for attempt in range(_call_ids_impl.retries):
if debug: logger.debug("Waiting for lock (%d)"%attempt)
if self._lock.acquire(False):
has_lock=True
if debug: logger.debug("got lock (%d)"%attempt)
break
time.sleep(float(_call_ids_impl.wait_ms)/1000)
# in the unlikely event where we can't get the lock
if not has_lock:
logger.warning("_call_ids_impl.should_handle_call_id: could not acquire lock")
return False
# we're good to go
if self.has_key(call_id):
self._purge()
self._lock.release()
return True
self[call_id]=time.time()
self._purge()
self._lock.release()
if debug: logger.debug("released lock")
return False
def _purge(self):
now=time.time()
o_keys=[]
for (k,v) in self.iteritems():
if (now-v) >= _call_ids_impl.purge_timeout: o_keys.append(k)
for k in o_keys:
if debug: logger.debug("Purging call_id %r (%s)"%(k,time.strftime("%H:%M:%S",time.localtime(self[k]))))
del self[k]
if debug:
logger.debug("AFTER PURGE")
for (k,v) in self.iteritems(): logger.debug("%s -> %s"%(k,time.strftime("%H:%M:%S",time.localtime(v))))
def Callids ():
if not _call_ids_impl._instance:
_call_ids_impl._instance = _call_ids_impl()
return _call_ids_impl._instance
| bsd-3-clause | 5,228,022,733,388,344,000 | 30.666667 | 115 | 0.578509 | false |
openweave/openweave-core | src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_48.py | 1 | 3501 | #!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM mutual subscribe between nodes.
# I05: Mutual Subscribe: Responder Continuous Events. Mutate data in responder. Client in initiator cancels
# M29: Stress Mutual Subscribe: Responder Continuous Events. Mutate data in responder. Client in initiator cancels
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_mutual_subscribe_48(weave_wdm_next_test_base):
def test_weave_wdm_next_mutual_subscribe_48(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['total_client_count'] = 4
wdm_next_args['final_client_status'] = 0
wdm_next_args['timer_client_period'] = 16000
wdm_next_args['test_client_iterations'] = 5
wdm_next_args['test_client_delay'] = 35000
wdm_next_args['enable_client_flip'] = 0
wdm_next_args['total_server_count'] = 4
wdm_next_args['final_server_status'] = 4
wdm_next_args['timer_server_period'] = 15000
wdm_next_args['enable_server_flip'] = 1
wdm_next_args['server_event_generator'] = 'Security'
wdm_next_args['server_inter_event_period'] = 2000
wdm_next_args['client_log_check'] = [('Handler\[0\] \[(ALIVE|CONFM)\] bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] EndSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['server_log_check'] = [('bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] CancelRequestHandler', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['M29: Stress Mutual Subscribe: Responder Continuous Events. Mutate data in responder. Client in initiator cancels']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test I05 and M29")
super(test_weave_wdm_next_mutual_subscribe_48, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
| apache-2.0 | -7,304,782,260,674,297,000 | 45.065789 | 161 | 0.628106 | false |
live-clones/dolfin-adjoint | timestepping/python/timestepping/pre_assembled_equations.py | 1 | 21818 | #!/usr/bin/env python2
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014-2016 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import dolfin
import ufl
from .caches import *
from .equation_solvers import *
from .exceptions import *
from .fenics_overrides import *
from .fenics_utils import *
from .pre_assembled_forms import *
from .statics import *
__all__ = \
[
"PAEquationSolver",
"pa_solve"
]
class PAEquationSolver(EquationSolver):
"""
An EquationSolver applying additional pre-assembly and linear solver caching
optimisations. This utilises pre-assembly of static terms. The arguments match
those accepted by the DOLFIN solve function, with the following differences:
Argument 1: May be a general equation. Linear systems are detected
automatically.
initial_guess: The initial guess for an iterative solver.
adjoint_solver_parameters: A dictionary of linear solver parameters for an
adjoint equation solve.
"""
def __init__(self, *args, **kwargs):
args, kwargs = copy.copy(args), copy.copy(kwargs)
# Process arguments not to be passed to _extract_args
if "initial_guess" in kwargs:
if not kwargs["initial_guess"] is None and not isinstance(kwargs["initial_guess"], dolfin.Function):
raise InvalidArgumentException("initial_guess must be a Function")
initial_guess = kwargs["initial_guess"]
del(kwargs["initial_guess"])
else:
initial_guess = None
if "adjoint_solver_parameters" in kwargs:
if not kwargs["adjoint_solver_parameters"] is None and not isinstance(kwargs["adjoint_solver_parameters"], dict):
raise InvalidArgumentException("adjoint_solver_parameters must be a dictionary")
adjoint_solver_parameters = kwargs["adjoint_solver_parameters"]
del(kwargs["adjoint_solver_parameters"])
else:
adjoint_solver_parameters = None
if "pre_assembly_parameters" in kwargs:
pre_assembly_parameters = kwargs["pre_assembly_parameters"]
del(kwargs["pre_assembly_parameters"])
else:
pre_assembly_parameters = {}
# Process remaining arguments
if "form_compiler_parameters" in kwargs:
raise NotImplementedException("form_compiler_parameters argument not supported")
eq, x, bcs, J, tol, goal, form_parameters, solver_parameters = dolfin.fem.solving._extract_args(*args, **kwargs)
# Relax requirements on equation syntax
eq_lhs_rank = form_rank(eq.lhs)
if eq_lhs_rank == 1:
form = eq.lhs
if not is_zero_rhs(eq.rhs):
form -= eq.rhs
if x in ufl.algorithms.extract_coefficients(form):
if J is None:
J = derivative(form, x)
if x in ufl.algorithms.extract_coefficients(J):
# Non-linear solve
is_linear = False
else:
# Linear solve, rank 2 LHS
cache_info("Detected that solve for %s is linear" % x.name())
form = dolfin.replace(form, {x:dolfin.TrialFunction(x.function_space())})
eq = dolfin.lhs(form) == dolfin.rhs(form)
eq_lhs_rank = form_rank(eq.lhs)
assert(eq_lhs_rank == 2)
is_linear = True
else:
# Linear solve, rank 1 LHS
is_linear = True
elif eq_lhs_rank == 2:
form = eq.lhs
if not is_zero_rhs(eq.rhs):
form -= eq.rhs
if not x in ufl.algorithms.extract_coefficients(form):
# Linear solve, rank 2 LHS
eq = dolfin.lhs(form) == dolfin.rhs(form)
eq_lhs_rank = form_rank(eq.lhs)
assert(eq_lhs_rank == 2)
is_linear = True
else:
# ??
raise InvalidArgumentException("Invalid equation")
# Initial guess sanity checking
if is_linear:
if not "krylov_solver" in solver_parameters:
solver_parameters["krylov_solver"] = {}
def initial_guess_enabled():
return solver_parameters["krylov_solver"].get("nonzero_initial_guess", False)
def initial_guess_disabled():
return not solver_parameters["krylov_solver"].get("nonzero_initial_guess", True)
def enable_initial_guess():
solver_parameters["krylov_solver"]["nonzero_initial_guess"] = True
return
if initial_guess is None:
if initial_guess_enabled():
initial_guess = x
elif eq_lhs_rank == 1:
# Supplied an initial guess for a linear solve with a rank 1 LHS -
# ignore it
initial_guess = None
elif "linear_solver" in solver_parameters and not solver_parameters["linear_solver"] in ["direct", "lu"] and not dolfin.has_lu_solver_method(solver_parameters["linear_solver"]):
# Supplied an initial guess with a Krylov solver - check the
# initial_guess solver parameter
if initial_guess_disabled():
raise ParameterException("initial_guess cannot be set if nonzero_initial_guess solver parameter is False")
enable_initial_guess()
elif is_linear:
# Supplied an initial guess for a linear solve with an LU solver -
# ignore it
initial_guess = None
# Initialise
EquationSolver.__init__(self, eq, x, bcs,
solver_parameters = solver_parameters,
adjoint_solver_parameters = adjoint_solver_parameters,
pre_assembly_parameters = pre_assembly_parameters)
self.__args = args
self.__kwargs = kwargs
self.__J = J
self.__tol = tol
self.__goal = goal
self.__form_parameters = form_parameters
self.__initial_guess = initial_guess
# Assemble
self.reassemble()
return
def reassemble(self, *args):
"""
Reassemble the PAEquationSolver. If no arguments are supplied, reassemble
both the LHS and RHS. Otherwise, only reassemble the LHS or RHS if they
depend upon the supplied Constant s or Function s. Note that this does
not clear the assembly or linear solver caches -- hence if a static
Constant, Function, or DirichletBC is modified then one should clear the
caches before calling reassemble on the PAEquationSolver.
"""
x, eq, bcs, linear_solver_parameters, pre_assembly_parameters = self.x(), \
self.eq(), self.bcs(), self.linear_solver_parameters(), \
self.pre_assembly_parameters()
x_deps = self.dependencies()
a, L, linear_solver = None, None, None
if self.is_linear():
for dep in x_deps:
if dep is x:
raise DependencyException("Invalid non-linear solve")
def assemble_lhs():
eq_lhs_rank = form_rank(eq.lhs)
if eq_lhs_rank == 2:
static_bcs = n_non_static_bcs(bcs) == 0
static_form = is_static_form(eq.lhs)
if not pre_assembly_parameters["equations"]["symmetric_boundary_conditions"] and len(bcs) > 0 and static_bcs and static_form:
a = assembly_cache.assemble(eq.lhs,
bcs = bcs, symmetric_bcs = False)
cache_info("Pre-assembled LHS terms in solve for %s : 1" % x.name())
cache_info("Non-pre-assembled LHS terms in solve for %s: 0" % x.name())
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters,
bcs = bcs, symmetric_bcs = False,
a = a)
linear_solver.set_operator(a)
elif len(bcs) == 0 and static_form:
a = assembly_cache.assemble(eq.lhs)
cache_info("Pre-assembled LHS terms in solve for %s : 1" % x.name())
cache_info("Non-pre-assembled LHS terms in solve for %s: 0" % x.name())
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters,
a = a)
linear_solver.set_operator(a)
else:
a = PAForm(eq.lhs, pre_assembly_parameters = pre_assembly_parameters["bilinear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters, pre_assembly_parameters["bilinear_forms"],
static = a.is_static() and static_bcs,
bcs = bcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
else:
assert(eq_lhs_rank == 1)
a = PAForm(eq.lhs, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = None
return a, linear_solver
def assemble_rhs():
L = PAForm(eq.rhs, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled RHS terms in solve for %s : %i" % (x.name(), L.n_pre_assembled()))
cache_info("Non-pre-assembled RHS terms in solve for %s: %i" % (x.name(), L.n_non_pre_assembled()))
return L
if len(args) == 0:
a, linear_solver = assemble_lhs()
L = assemble_rhs()
else:
a, linear_solver = self.__a, self.__linear_solver
L = self.__L
lhs_cs = ufl.algorithms.extract_coefficients(eq.lhs)
rhs_cs = ufl.algorithms.extract_coefficients(eq.rhs)
for dep in args:
if dep in lhs_cs:
a, linear_solver = assemble_lhs()
break
for dep in args:
if dep in rhs_cs:
L = assemble_rhs()
break
elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton":
J, hbcs = self.J(), self.hbcs()
def assemble_lhs():
a = PAForm(J, pre_assembly_parameters = pre_assembly_parameters["bilinear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = linear_solver_cache.linear_solver(J,
linear_solver_parameters, pre_assembly_parameters["bilinear_forms"],
static = False,
bcs = hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
return a, linear_solver
def assemble_rhs():
L = -eq.lhs
if not is_zero_rhs(eq.rhs):
L += eq.rhs
L = PAForm(L, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled RHS terms in solve for %s : %i" % (x.name(), L.n_pre_assembled()))
cache_info("Non-pre-assembled RHS terms in solve for %s: %i" % (x.name(), L.n_non_pre_assembled()))
return L
if len(args) == 0:
a, linear_solver = assemble_lhs()
L = assemble_rhs()
else:
a, linear_solver = self.__a, self.__linear_solver
L = self.__L
lhs_cs = set(ufl.algorithms.extract_coefficients(J))
rhs_cs = set(ufl.algorithms.extract_coefficients(eq.lhs))
if not is_zero_rhs(eq.rhs):
rhs_cs.update(ufl.algorithms.extract_coefficients(eq.rhs))
for dep in args:
if dep in lhs_cs:
a, linear_solver = assemble_lhs()
break
for dep in args:
if dep in rhs_cs:
L = assemble_rhs()
break
self.__dx = x.vector().copy()
self.__a, self.__L, self.__linear_solver = a, L, linear_solver
return
def dependencies(self, non_symbolic = False):
"""
Return equation dependencies. If non_symbolic is true, also return any
other dependencies which could alter the result of a solve, such as the
initial guess.
"""
if not non_symbolic:
return EquationSolver.dependencies(self, non_symbolic = False)
elif not self.__initial_guess is None:
deps = copy.copy(EquationSolver.dependencies(self, non_symbolic = True))
deps.add(self.__initial_guess)
return deps
else:
return EquationSolver.dependencies(self, non_symbolic = True)
def linear_solver(self):
"""
Return the linear solver.
"""
return self.__linear_solver
def solve(self):
"""
Solve the equation
"""
x, pre_assembly_parameters = self.x(), self.pre_assembly_parameters()
if not self.__initial_guess is None and not self.__initial_guess is x:
x.assign(self.__initial_guess)
if self.is_linear():
bcs, linear_solver = self.bcs(), self.linear_solver()
if isinstance(self.__a, dolfin.GenericMatrix):
L = assemble(self.__L, copy = len(bcs) > 0)
enforce_bcs(L, bcs)
linear_solver.solve(x.vector(), L)
elif self.__a.rank() == 2:
a = assemble(self.__a, copy = len(bcs) > 0)
L = assemble(self.__L, copy = len(bcs) > 0)
apply_bcs(a, bcs, L = L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(a)
linear_solver.solve(x.vector(), L)
else:
assert(self.__a.rank() == 1)
assert(linear_solver is None)
a = assemble(self.__a, copy = False)
L = assemble(self.__L, copy = False)
assert(L.local_range() == a.local_range())
x.vector().set_local(L.array() / a.array())
x.vector().apply("insert")
enforce_bcs(x.vector(), bcs)
elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton":
# Newton solver, intended to have near identical behaviour to the Newton
# solver supplied with DOLFIN. See
# http://fenicsproject.org/documentation/tutorial/nonlinear.html for
# further details.
default_parameters = dolfin.NewtonSolver.default_parameters()
solver_parameters = self.solver_parameters()
if "newton_solver" in solver_parameters:
parameters = solver_parameters["newton_solver"]
else:
parameters = {}
linear_solver = self.linear_solver()
atol = default_parameters["absolute_tolerance"]
rtol = default_parameters["relative_tolerance"]
max_its = default_parameters["maximum_iterations"]
omega = default_parameters["relaxation_parameter"]
err = default_parameters["error_on_nonconvergence"]
r_def = default_parameters["convergence_criterion"]
for key in parameters.keys():
if key == "absolute_tolerance":
atol = parameters[key]
elif key == "convergence_criterion":
r_def = parameters[key]
elif key == "error_on_nonconvergence":
err = parameters[key]
elif key == "maximum_iterations":
max_its = parameters[key]
elif key == "relative_tolerance":
rtol = parameters[key]
elif key == "relaxation_parameter":
omega = parameters[key]
elif key in ["linear_solver", "preconditioner", "lu_solver", "krylov_solver"]:
pass
elif key in ["method", "report"]:
raise NotImplementedException("Unsupported Newton solver parameter: %s" % key)
else:
raise ParameterException("Unexpected Newton solver parameter: %s" % key)
eq, bcs, hbcs = self.eq(), self.bcs(), self.hbcs()
a, L = self.__a, self.__L
x_name = x.name()
x = x.vector()
enforce_bcs(x, bcs)
dx = self.__dx
if not isinstance(linear_solver, dolfin.GenericLUSolver):
dx.zero()
if r_def == "residual":
l_L = assemble(L, copy = len(hbcs) > 0)
enforce_bcs(l_L, hbcs)
r_0 = l_L.norm("l2")
it = 0
if r_0 >= atol:
l_a = assemble(a, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
atol = max(atol, r_0 * rtol)
while it < max_its:
l_L = assemble(L, copy = len(hbcs) > 0)
enforce_bcs(l_L, hbcs)
r = l_L.norm("l2")
if r < atol:
break
l_a = assemble(a, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
elif r_def == "incremental":
l_a = assemble(a, copy = len(hbcs) > 0)
l_L = assemble(L, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it = 1
r_0 = dx.norm("l2")
if r_0 >= atol:
atol = max(atol, rtol * r_0)
while it < max_its:
l_a = assemble(a, copy = len(hbcs) > 0)
l_L = assemble(L, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
if dx.norm("l2") < atol:
break
else:
raise ParameterException("Invalid convergence criterion: %s" % r_def)
if it == max_its:
if err:
raise StateException("Newton solve for %s failed to converge after %i iterations" % (x_name, it))
else:
dolfin.warning("Newton solve for %s failed to converge after %i iterations" % (x_name, it))
# dolfin.info("Newton solve for %s converged after %i iterations" % (x_name, it))
else:
problem = dolfin.NonlinearVariationalProblem(self.eq().lhs - self.eq().rhs, x, bcs = self.bcs(), J = self.J())
nl_solver = dolfin.NonlinearVariationalSolver(problem)
nl_solver.parameters.update(self.solver_parameters())
nl_solver.solve()
return
def pa_solve(*args, **kwargs):
"""
Instantiate a PAEquationSolver using the supplied arguments and call its solve
method.
"""
PAEquationSolver(*args, **kwargs).solve()
return
| lgpl-3.0 | 5,529,168,736,928,071,000 | 45.619658 | 189 | 0.534788 | false |
trnewman/VT-USRP-daughterboard-drivers_python | gnuradio-core/src/lib/filter/generate_gr_fir_sysconfig.py | 1 | 3066 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from generate_utils import *
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_h ():
out = open_and_log_name ('gr_fir_sysconfig.h', 'w')
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifndef INCLUDED_GR_FIR_SYSCONFIG_H
#define INCLUDED_GR_FIR_SYSCONFIG_H
#include <gr_types.h>
''')
# for sig in fir_signatures:
# out.write ('class gr_fir_' + sig + ';\n')
out.write ('#include <gr_fir_util.h>\n')
out.write (
'''
/*!
* \\brief abstract base class for configuring the automatic selection of the
* fastest gr_fir for your platform.
*
* This is used internally by gr_fir_util.
*/
class gr_fir_sysconfig {
public:
virtual ~gr_fir_sysconfig ();
''')
for sig in fir_signatures:
out.write ((' virtual gr_fir_%s *create_gr_fir_%s (const std::vector<%s> &taps) = 0;\n' %
(sig, sig, tap_type (sig))))
out.write ('\n')
for sig in fir_signatures:
out.write ((' virtual void get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info) = 0;\n' %
(sig, sig)))
out.write (
'''
};
/*
* This returns the single instance of the appropriate derived class.
* This function must be defined only once in the system, and should be defined
* in the platform specific code.
*/
gr_fir_sysconfig *gr_fir_sysconfig_singleton ();
#endif /* INCLUDED_GR_FIR_SYSCONFIG_H */
''')
out.close ()
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_cc ():
out = open_and_log_name ('gr_fir_sysconfig.cc', 'w')
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <gr_fir_sysconfig.h>
gr_fir_sysconfig::~gr_fir_sysconfig ()
{
}
''')
out.close ()
# ----------------------------------------------------------------
def generate ():
make_gr_fir_sysconfig_h ()
make_gr_fir_sysconfig_cc ()
if __name__ == '__main__':
generate ()
| gpl-3.0 | 2,011,145,555,511,358,200 | 23.141732 | 98 | 0.616765 | false |
Hiestaa/3D-Lsystem | Vector.py | 1 | 1792 | class Vector:
"""represente un vecteur 3d"""
def __init__(self, arg = (0, 0, 0)):
self.x = float(arg[0])
self.y = float(arg[1])
self.z = float(arg[2])
def set(self, val):
if isinstance(val, self.__class__):
self.x = val.x
self.y = val.y
self.z = val.z
else:
self.x = val[0]
self.y = val[1]
self.z = val[2]
return self;
def toString(self):
return "(" + str(self.x) + ", " + str(self.y) + ", " + str(self.z) + ")"
def __mul__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x * other.x, self.y * other.y, self.z * other.z))
else:
return Vector((self.x * other, self.y * other, self.z * other))
def __rmul__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x * other.x, self.y * other.y, self.z * other.z))
else:
return Vector((self.x * other, self.y * other, self.z * other))
def __imul__(self, other):
if isinstance(other, self.__class__):
self.x *= other.x
self.y *= other.y
self.z *= other.z
else:
self.x *= other
self.y *= other
self.z *= other
return self
def __add__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x + other.x, self.y + other.y, self.z + other.z))
else:
return Vector((self.x + other, self.y + other, self.z + other))
def __radd__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x + other.x, self.y + other.y, self.z + other.z))
else:
return Vector((self.x + other, self.y + other, self.z + other))
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other
self.y += other
self.z += other
return self
def toTuple(self):
return (self.x, self.y, self.z) | mit | -1,565,978,637,445,939,500 | 24.985507 | 74 | 0.582589 | false |
SymbiFlow/prjxray | minitests/litex/uart_ddr/arty/scripts/arty.py | 1 | 4274 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
# This file is Copyright (c) 2015-2020 Florent Kermarrec <[email protected]>
# License: BSD
import argparse
from migen import *
from litex_boards.platforms import arty
from litex.build.xilinx import VivadoProgrammer
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.init import get_sdram_phy_py_header
from litedram.modules import MT41K128M16
from litedram.phy import s7ddrphy
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
# # #
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(~platform.request("cpu_reset"))
pll.register_clkin(platform.request("clk100"), 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4 * sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4 * sys_clk_freq, phase=90)
pll.create_clkout(self.cd_clk200, 200e6)
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCSDRAM):
def __init__(self):
platform = arty.Platform()
sys_clk_freq = int(50e6)
# SoCSDRAM ---------------------------------------------------------------------------------
SoCSDRAM.__init__(
self,
platform,
clk_freq=sys_clk_freq,
ident="Minimal Arty DDR3 Design for tests with Project X-Ray",
ident_version=True,
cpu_type=None,
l2_size=16,
uart_name="bridge")
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(
platform.request("ddram"),
memtype="DDR3",
nphases=4,
sys_clk_freq=sys_clk_freq)
self.add_csr("ddrphy")
sdram_module = MT41K128M16(sys_clk_freq, "1:4")
self.register_sdram(
self.ddrphy,
geom_settings=sdram_module.geom_settings,
timing_settings=sdram_module.timing_settings)
def generate_sdram_phy_py_header(self):
f = open("sdram_init.py", "w")
f.write(
get_sdram_phy_py_header(
self.sdram.controller.settings.phy,
self.sdram.controller.settings.timing))
f.close()
# Load ---------------------------------------------------------------------------------------------
def load():
prog = VivadoProgrammer()
prog.load_bitstream("build/gateware/top.bit")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Minimal Arty DDR3 Design for tests with Project X-Ray")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
args = parser.parse_args()
if args.load:
load()
soc = BaseSoC()
builder = Builder(soc, output_dir="build", csr_csv="csr.csv")
builder.build(run=args.build)
soc.generate_sdram_phy_py_header()
if __name__ == "__main__":
main()
| isc | -8,789,639,174,733,908,000 | 32.920635 | 100 | 0.535564 | false |
kapil1garg/eecs338-chris-jones | show_query.py | 1 | 3440 | import json
import elastic
from operator import itemgetter
from default_query import DefaultQuery
class ShowQuery(DefaultQuery):
"""
Handles ES queries related to shows
"""
def __init__(self):
DefaultQuery.__init__(self)
def generate_response_best_show(self, query, annotated_query):
# find document id with max polarity
payload = {
'_source': ['documentSentiment.polarity'],
'query': {
'bool': {
'must': [{
'match': {
'Full text:': p
}}
for p in annotated_query.shows]
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))['hits']['hits']
polarities = [(i['_id'], i['_source']['documentSentiment']['polarity']) for i in r]
id_max_polarity = max(polarities, key=itemgetter(1))[0]
# return sentence from document id that contains show in a sentence
payload = {
'_source': ['sentences.content', 'Full text:', 'ProQ:'],
'query': {
'bool': {
'must': [{
'ids': {
'values': [id_max_polarity]
}},
{'nested': {
'path': 'sentences',
'query': {
'bool': {
'must': [{'match': {'sentences.content': p}} for p in annotated_query.shows]
}
},
'inner_hits': {}
}}]
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))['hits']['hits']
r = [(i['inner_hits']['sentences']['hits'], i['_source']['ProQ:'], i['_source']['Full text:']) for i in r]
return self.format_response(r[0])
def generate_response_person_in_show(self, query, annotated_query):
match_queries = [{
'match': {
'Full text:': show
}
}
for show in annotated_query.shows
]
match_queries.append({
'nested': {
'path': 'sentences',
'query': {
'bool': {
'must': [{
'match': {
'sentences.content': p
}
}
for p in annotated_query.people
]
}
},
'inner_hits': {}
}
})
payload = {
'_source': ['sentences.content', 'Full text:', 'ProQ:'],
'query': {
'bool': {
'must': match_queries
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))
print r
r = r['hits']['hits']
r = [(i['inner_hits']['sentences']['hits'], i['_source']['ProQ:'], i['_source']['Full text:']) for i in r]
return self.format_response(r[0])
| mit | 2,222,587,426,113,204,000 | 34.102041 | 117 | 0.386628 | false |
jakevdp/altair | altair/utils/server.py | 1 | 4035 | """
A Simple server used to show altair graphics from a prompt or script.
This is adapted from the mpld3 package; see
https://github.com/mpld3/mpld3/blob/master/mpld3/_server.py
"""
import sys
import threading
import webbrowser
import socket
import itertools
import random
from ._py3k_compat import server, IO
JUPYTER_WARNING = """
Note: if you're in the Jupyter notebook, Chart.serve() is not the best
way to view plots. Consider using Chart.display().
You must interrupt the kernel to cancel this command.
"""
# Mock server used for testing
class MockRequest(object):
def makefile(self, *args, **kwargs):
return IO(b"GET /")
def sendall(self, response):
pass
class MockServer(object):
def __init__(self, ip_port, Handler):
Handler(MockRequest(), ip_port[0], self)
def serve_forever(self):
pass
def server_close(self):
pass
def generate_handler(html, files=None):
if files is None:
files = {}
class MyHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode())
elif self.path in files:
content_type, content = files[self.path]
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content.encode())
else:
self.send_error(404)
return MyHandler
def find_open_port(ip, port, n=50):
"""Find an open port near the specified port"""
ports = itertools.chain((port + i for i in range(n)),
(port + random.randint(-2 * n, 2 * n)))
for port in ports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((ip, port))
s.close()
if result != 0:
return port
raise ValueError("no open ports found")
def serve(html, ip='127.0.0.1', port=8888, n_retries=50, files=None,
jupyter_warning=True, open_browser=True, http_server=None):
"""Start a server serving the given HTML, and (optionally) open a browser
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port is in use.
files : dictionary (optional)
dictionary of extra content to serve
jupyter_warning : bool (optional)
if True (default), then print a warning if this is used within Jupyter
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
"""
port = find_open_port(ip, port, n_retries)
Handler = generate_handler(html, files)
if http_server is None:
srvr = server.HTTPServer((ip, port), Handler)
else:
srvr = http_server((ip, port), Handler)
if jupyter_warning:
try:
__IPYTHON__ # noqa
except:
pass
else:
print(JUPYTER_WARNING)
# Start the server
print("Serving to http://{}:{}/ [Ctrl-C to exit]".format(ip, port))
sys.stdout.flush()
if open_browser:
# Use a thread to open a web browser pointing to the server
b = lambda: webbrowser.open('http://{}:{}'.format(ip, port))
threading.Thread(target=b).start()
try:
srvr.serve_forever()
except (KeyboardInterrupt, SystemExit):
print("\nstopping Server...")
srvr.server_close()
| bsd-3-clause | 4,777,088,075,087,904,000 | 29.11194 | 78 | 0.603965 | false |
CWDoherty/Baseball | Scripts/hashtags.py | 1 | 1999 | '''
Copyright (c) 2015 Chris Doherty, Oliver Nabavian
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import mysql.connector, re
config = {
'user': 'root',
'password': 'isles40',
'host': '127.0.0.1',
'database': 'baseballdb'
}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(buffered=True)
tweets = ("SELECT message, user_id, tweet_id FROM Tweet")
cursor.execute(tweets)
tweet_list = []
count = 0
for c in cursor:
if '#' in c[0]:
tweet_list.append(c)
find_tags = re.compile("\S*#(?:\S+)")
all_tag = []
for t in tweet_list:
tags = re.findall(find_tags, t[0])
if(len(tags) > 0):
all_tag.append([tags, t[1], t[2]])
insert = ("INSERT INTO Hashtag(tag, user_id, tweet_id) VALUES (%s, %s, %s)")
query = []
for a in all_tag:
for x in a[0]:
temp = [x, a[1], a[2]]
query.append(temp)
print query
for x in range(len(query)):
try:
cursor.execute(insert, query[x])
cnx.commit()
except:
# Duplicate entries will not make it into the database
continue
cursor.close()
cnx.close()
| mit | -4,705,462,326,985,644,000 | 26.013514 | 77 | 0.722361 | false |
karesansui/karesansui | karesansui/gadget/hostby1staticroute.py | 1 | 6419 | # -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
import web
import simplejson as json
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.db.access.machine import findbyhost1
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_CHAR, CHECK_MIN, CHECK_MAX, CHECK_ONLYSPACE, \
CHECK_UNIQUE
from karesansui.lib.utils import is_param, is_empty, preprint_r, \
base64_encode, get_ifconfig_info
from karesansui.lib.networkaddress import NetworkAddress
from karesansui.lib.parser.staticroute import staticrouteParser as Parser
from karesansui.lib.conf import read_conf, write_conf
def validates_staticroute(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'target'):
check = False
checker.add_error(_('Specify target address for the route.'))
else:
check = checker.check_ipaddr(
_('Target'),
obj.input.target,
CHECK_EMPTY | CHECK_VALID,
) and check
if not is_param(obj.input, 'gateway'):
check = False
checker.add_error(_('Specify gateway address for the route.'))
else:
check = checker.check_ipaddr(
_('Gateway'),
obj.input.gateway,
CHECK_VALID,
) and check
obj.view.alert = checker.errors
return check
class HostBy1StaticRoute(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
self.view.host_id = host_id
# unremovable entries
excludes = {
"device": ["^peth","^virbr","^sit","^xenbr","^lo","^br"],
"ipaddr": ["^0\.0\.0\.0$", "^169\.254\.0\.0$"],
}
devices = []
phydev_regex = re.compile(r"^eth[0-9]+")
for dev,dev_info in get_ifconfig_info().iteritems():
if phydev_regex.match(dev):
try:
if dev_info['ipaddr'] is not None:
devices.append(dev)
net = NetworkAddress("%s/%s" % (dev_info['ipaddr'],dev_info['mask'],))
excludes['ipaddr'].append(net.network)
except:
pass
self.view.devices = devices
parser = Parser()
status = parser.do_status()
routes = {}
for _k,_v in status.iteritems():
for _k2,_v2 in _v.iteritems():
name = base64_encode("%s@%s" % (_k2,_k,))
routes[name] = {}
routes[name]['name'] = name
routes[name]['device'] = _k
routes[name]['gateway'] = _v2['gateway']
routes[name]['flags'] = _v2['flags']
routes[name]['ref'] = _v2['ref']
routes[name]['use'] = _v2['use']
net = NetworkAddress(_k2)
routes[name]['ipaddr'] = net.ipaddr
routes[name]['netlen'] = net.netlen
routes[name]['netmask'] = net.netmask
removable = True
for _ex_key,_ex_val in excludes.iteritems():
ex_regex = "|".join(_ex_val)
mm = re.search(ex_regex,routes[name][_ex_key])
if mm:
removable = False
routes[name]['removable'] = removable
self.view.routes = routes
if self.is_mode_input():
pass
return True
@auth
def _POST(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
if not validates_staticroute(self):
return web.badrequest(self.view.alert)
modules = ["staticroute"]
dop = read_conf(modules, self, host)
if dop is False:
return web.internalerror('Internal Server Error. (Timeout)')
target = self.input.target
net = NetworkAddress(target)
ipaddr = net.ipaddr
netmask = net.netmask
netlen = net.netlen
network = net.network
target = "%s/%s" % (ipaddr,netlen,)
gateway = self.input.gateway
device = self.input.device
dop.set("staticroute", [device,target], gateway)
from karesansui.lib.parser.staticroute import PARSER_COMMAND_ROUTE
if net.netlen == 32:
command = "%s add -host %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,gateway,device,)
command = "%s add -host %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,device,)
else:
command = "%s add -net %s netmask %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,network,netmask,gateway,device,)
extra_args = {"post-command": command}
retval = write_conf(dop, self, host, extra_args=extra_args)
if retval is False:
return web.internalerror('Internal Server Error. (Adding Task)')
return web.accepted(url=web.ctx.path)
urls = (
'/host/(\d+)/staticroute[/]?(\.html|\.part|\.json)?$', HostBy1StaticRoute,
)
| mit | 270,860,576,896,063,070 | 33.326203 | 119 | 0.581087 | false |
dlu-ch/dlb | test/dlb_contrib/test_git.py | 1 | 21925 | # SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <[email protected]>
import testenv # also sets up module search paths
import dlb.di
import dlb.fs
import dlb.ex
import dlb_contrib.generic
import dlb_contrib.git
import dlb_contrib.sh
import os.path
import tempfile
import subprocess
import re
import unittest
class PrepareGitRepo(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = """
git init
git config user.email "[email protected]"
git config user.name "dlu-ch"
git add .dlbroot/o
echo .dlbroot/ > .gitignore
echo x > x
git add x .gitignore
git commit -m 'Initial commit'
echo x >> x
git commit -a -m 'Enlarge x'
git tag -a v1.2.3c4 -m 'Release'
echo x >> x
git commit -a -m 'Enlarge x even further'
mkdir d
echo y > d/y
git add d/y
echo z > d/z
git add d/z
echo a > 'a -> b'
git add 'a -> b'
git commit -m 'Add files'
git mv x 'y -> z'
git mv 'a -> b' c
git mv d e
git mv e/y why
echo u > e/u
"""
# each annotated tag starting with 'v' followed by a decimal digit must match this (after 'v'):
VERSION_REGEX = re.compile(
r'^'
r'(?P<major>0|[1-9][0-9]*)\.(?P<minor>0|[1-9][0-9]*)\.(?P<micro>0|[1-9][0-9]*)'
r'((?P<post>[abc])(?P<post_number>0|[1-9][0-9]*))?'
r'$')
class ModificationsFromStatusTest(unittest.TestCase):
def test_branch_header(self):
lines = [
'# branch.oid b5fb8c02a485f9f7a5d4aee95848bf9c9d2b0f7f',
'# branch.head "äüä"',
'# branch.upstream origin/master',
'# branch.ab +12 -3'
]
_, _, branch_refname, upstream_branch_refname, before_upstream, behind_upstream = \
dlb_contrib.git.modifications_from_status(lines)
self.assertEqual('refs/heads/"äüä"', branch_refname)
self.assertEqual('refs/remotes/origin/master', upstream_branch_refname)
self.assertEqual((12, 3), (before_upstream, behind_upstream))
lines = [
'# branch.oid b5fb8c02a485f9f7a5d4aee95848bf9c9d2b0f7f',
'# branch.head (detached)'
]
_, _, branch_refname, upstream_branch_refname, before_upstream, behind_upstream = \
dlb_contrib.git.modifications_from_status(lines)
self.assertEqual('refs/heads/(detached)', branch_refname) # is ambiguous
self.assertIsNone(upstream_branch_refname)
self.assertIsNone(before_upstream)
self.assertIsNone(behind_upstream)
def test_single_non_header_line(self):
line = (
'1 .M N... 100644 100644 100644 '
'd8755f8b2ede3dc58822895fa85e0e51c8f20dda d8755f8b2ede3dc58822895fa85e0e51c8f20dda jöö/herzig'
)
self.assertEqual({dlb.fs.Path('jöö/herzig'): (' M', None)},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'1 A. N... 000000 100644 100644 '
'0000000000000000000000000000000000000000 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 "a\\tb\\nc\\"\'d "'
)
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d '): ('A ', None)},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 a\tb'
)
self.assertEqual({dlb.fs.Path('b'): ('R ', dlb.fs.Path('a'))},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 "a\\"b"\ta -> b'
)
self.assertEqual({dlb.fs.Path('a -> b'): ('R ', dlb.fs.Path('a"b'))},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 '
'a\t"a\\tb\\nc\\"\'d "'
)
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d '): ('R ', dlb.fs.Path('a'))},
dlb_contrib.git.modifications_from_status([line])[0])
self.assertEqual({dlb.fs.Path('a')},
dlb_contrib.git.modifications_from_status(['? a'])[1])
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d ')},
dlb_contrib.git.modifications_from_status(['? "a\\tb\\nc\\"\'d "'])[1])
def test_fails_on_invalid_line(self):
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['# branch.ab +0'])
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['1 A.'])
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['2 R.'])
class CheckRefNameTest(unittest.TestCase):
def test_empty_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('')
self.assertEqual(str(cm.exception), 'refname component must not be empty')
def test_single_slashes_are_valid(self):
dlb_contrib.git.check_refname('a/b/c')
def test_consecutive_slashes_are_valid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a//b')
self.assertEqual(str(cm.exception), 'refname component must not be empty')
def test_single_dot_in_the_middle_is_valid(self):
dlb_contrib.git.check_refname('a/b.c')
def test_at_at_certain_position_is_valid(self):
dlb_contrib.git.check_refname('a/{@}/b')
def test_single_at_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a/@/b')
self.assertEqual(str(cm.exception), "refname component must not be '@'")
def test_at_followed_by_brace_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a@{b')
self.assertEqual(str(cm.exception), "refname component must not contain '@{'")
def test_double_dot_in_the_middle_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a/b..c')
self.assertEqual(str(cm.exception), "refname component must not contain '..'")
def test_control_character_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\0b')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\nb')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\x7Fb')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
class DescribeWorkingDirectory(dlb_contrib.git.GitDescribeWorkingDirectory):
SHORTENED_COMMIT_HASH_LENGTH = 8 # number of characters of the SHA1 commit hash in the *wd_version*
# working directory version
# examples: '1.2.3', '1.2.3c4-dev5+deadbeef?'
wd_version = dlb.ex.output.Object(explicit=False)
# tuple of the version according to the version tag
version_components = dlb.ex.output.Object(explicit=False)
async def redo(self, result, context):
await super().redo(result, context)
shortened_commit_hash_length = min(40, max(1, int(self.SHORTENED_COMMIT_HASH_LENGTH)))
version = result.tag_name[1:]
m = VERSION_REGEX.fullmatch(version)
if not m:
raise ValueError(f'annotated tag is not a valid version number: {result.tag_name!r}')
wd_version = version
if result.commit_number_from_tag_to_latest_commit:
wd_version += f'-dev{result.commit_number_from_tag_to_latest_commit}' \
f'+{result.latest_commit_hash[:shortened_commit_hash_length]}'
if result.has_changes_in_tracked_files:
wd_version += '?'
result.wd_version = wd_version
result.version_components = (
int(m.group('major')), int(m.group('minor')), int(m.group('micro')),
m.group('post'), None if m.group('post_number') is None else int(m.group('post_number'))
)
return True
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
@unittest.skipIf(not testenv.has_executable_in_path('sh'), 'requires sh in $PATH')
class GitDescribeWorkingDirectoryTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_line_output(self):
with dlb.ex.Context():
class AddLightWeightTag(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git tag v2' # light-weight tag does not affect 'git describe'
PrepareGitRepo().start().complete()
AddLightWeightTag().start().complete()
result = DescribeWorkingDirectory().start()
dlb.di.inform(f"version: {result.version_components!r}, wd version: {result.wd_version!r}")
dlb.di.inform(f"changed: {result.modification_by_file.keys()!r}")
self.assertEqual({
dlb.fs.Path('a -> b'): ('R ', dlb.fs.Path('c')),
dlb.fs.Path('d/y'): ('R ', dlb.fs.Path('why')),
dlb.fs.Path('d/z'): ('R ', dlb.fs.Path('e/z')),
dlb.fs.Path('x'): ('R ', dlb.fs.Path('y -> z'))
}, result.modification_by_file)
self.assertEqual({dlb.fs.Path('e/u')}, result.untracked_files)
self.assertEqual((1, 2, 3, 'c', 4), result.version_components)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev2\+[0-9a-f]{8}\?$')
self.assertEqual('refs/heads/master', result.branch_refname)
with dlb.ex.Context():
class CommitGitRepo(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git commit -a -m 0'
CommitGitRepo().start()
result = DescribeWorkingDirectory().start()
self.assertEqual({}, result.modification_by_file)
self.assertEqual({dlb.fs.Path('e/u')}, result.untracked_files)
self.assertEqual((1, 2, 3, 'c', 4), result.version_components)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
with dlb.ex.Context():
class CheckoutBranch(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git checkout -f -b "(detached)"'
CheckoutBranch().start()
result = DescribeWorkingDirectory().start()
self.assertEqual('refs/heads/(detached)', result.branch_refname)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
with dlb.ex.Context():
class CheckoutDetached(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git checkout --detach'
CheckoutDetached().start()
result = DescribeWorkingDirectory().start()
self.assertIsNone(result.branch_refname)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
def test_gitignore_can_hide_every_modification(self):
class PrepareRepoWithHiddenModifications(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = """
git init
git config user.email "[email protected]"
git config user.name "dlu-ch"
echo x > x
git add x
git commit -m 'Initial commit'
git tag -a v0.0.0 -m 'Initial tag'
echo .gitignore > .gitignore
echo .dlbroot >> .gitignore
echo ignored >> .gitignore
touch ignored
"""
with dlb.ex.Context():
PrepareRepoWithHiddenModifications().start().complete()
result = DescribeWorkingDirectory().start()
self.assertEqual({}, result.modification_by_file)
class DefaultVersionTagTest(unittest.TestCase):
REGEX = re.compile(dlb_contrib.git.GitCheckTags.ANNOTATED_TAG_NAME_REGEX)
def test_fails_for_empty(self):
self.assertFalse(self.REGEX.fullmatch(''))
def test_fails_for_missing_v(self):
self.assertFalse(self.REGEX.fullmatch('1.2.3'))
def test_fails_for_leading_zero(self):
self.assertFalse(self.REGEX.fullmatch('v01.2.3'))
self.assertFalse(self.REGEX.fullmatch('v1.02.3'))
self.assertFalse(self.REGEX.fullmatch('v1.02.03'))
def test_matches_dotted_integers(self):
self.assertTrue(self.REGEX.fullmatch('v1'))
self.assertTrue(self.REGEX.fullmatch('v1.2'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3'))
self.assertTrue(self.REGEX.fullmatch('v1.20.345.6789'))
self.assertTrue(self.REGEX.fullmatch('v0.0.0'))
def test_fails_without_trailing_decimal_digit(self):
self.assertFalse(self.REGEX.fullmatch('v1.2.3pre'))
def test_matches_dotted_integers_with_suffix(self):
self.assertTrue(self.REGEX.fullmatch('v1.2.3a4'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3rc0'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3patch747'))
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
@unittest.skipIf(not testenv.has_executable_in_path('sh'), 'requires sh in $PATH')
class GitCheckTagsTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_local_only(self):
class GitCheckTags(dlb_contrib.git.GitCheckTags):
REMOTE_NAME_TO_SYNC_CHECK = ''
class GitCheckTags2(GitCheckTags):
LIGHTWEIGHT_TAG_NAME_REGEX = 'latest_.*'
with dlb.ex.Context():
PrepareGitRepo().start().complete()
subprocess.check_output(['git', 'tag', '-a', 'v2.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', 'vw'])
result = GitCheckTags().start()
self.assertEqual({'v1.2.3c4', 'v2.0.0'}, set(result.commit_by_annotated_tag_name))
self.assertEqual({'vw'}, set(result.commit_by_lightweight_tag_name))
with dlb.ex.Context():
output = subprocess.check_output(['git', 'rev-parse', 'v1.2.3c4^{}', 'v2.0.0^{}', 'vw'])
commit_hashes = output.decode().splitlines()
self.assertEqual({
'v1.2.3c4': commit_hashes[0],
'v2.0.0': commit_hashes[1]
}, result.commit_by_annotated_tag_name)
self.assertEqual({
'vw': commit_hashes[2]
}, result.commit_by_lightweight_tag_name)
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', 'v2'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "name of lightweight tag does match 'ANNOTATED_TAG_NAME_REGEX': 'v2'"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'v2'])
subprocess.check_output(['git', 'tag', '-a', 'v_3.0', '-m', 'Release'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "name of annotated tag does not match 'ANNOTATED_TAG_NAME_REGEX': 'v_3.0'"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'v_3.0'])
with self.assertRaises(ValueError) as cm:
GitCheckTags2().start().complete()
msg = "name of lightweight tag does not match 'LIGHTWEIGHT_TAG_NAME_REGEX': 'vw'"
self.assertEqual(msg, str(cm.exception))
def test_remote_too(self):
class GitCheckTags(dlb_contrib.git.GitCheckTags):
pass
class GitCheckTags2(GitCheckTags):
DO_SYNC_CHECK_LIGHTWEIGHT_TAGS = True
origin_repo_dir = os.path.abspath(tempfile.mkdtemp())
with testenv.DirectoryChanger(origin_repo_dir):
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.email', '[email protected]'])
subprocess.check_output(['git', 'config', 'user.name', 'user.name'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3c4', '-m', 'Release'])
subprocess.check_output(['touch', 'y'])
subprocess.check_output(['git', 'add', 'y'])
subprocess.check_output(['git', 'commit', '-m', 'Add y'])
subprocess.check_output(['git', 'tag', '-a', 'v2.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', '-a', 'v2.0.1', '-m', 'Release'])
subprocess.check_output(['git', 'tag', 'vm'])
subprocess.check_output(['git', 'tag', 'v'])
subprocess.check_output(['git', 'tag', 'w'])
subprocess.check_output(['git', 'init'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'remote', 'add', 'origin', origin_repo_dir])
subprocess.check_output(['git', 'fetch'])
subprocess.check_output(['git', 'fetch', '--tags'])
with dlb.ex.Context():
GitCheckTags().start()
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'vm'])
subprocess.check_output(['git', 'tag', '-d', 'v'])
GitCheckTags().start() # do not sync lightweight tags by default
with self.assertRaises(ValueError) as cm:
GitCheckTags2().start().complete()
msg = "remote tags missing locally: 'v', 'vm'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-d', 'v1.2.3c4'])
subprocess.check_output(['git', 'tag', '-d', 'v2.0.1'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "remote tags missing locally: 'v1.2.3c4', 'v2.0.1'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3c4', '-m', 'Release']) # different commit
subprocess.check_output(['git', 'tag', '-a', 'v2.0.1', '-m', 'Release']) # different commit
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "tags for different commits locally and remotely: 'v1.2.3c4', 'v2.0.1'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-a', 'v3.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', '-a', 'v3.0.1', '-m', 'Release'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "local tags missing on remotely: 'v3.0.0', 'v3.0.1'"
self.assertEqual(msg, str(cm.exception))
def test_example(self):
origin_repo_dir = os.path.abspath(tempfile.mkdtemp())
with testenv.DirectoryChanger(origin_repo_dir):
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.email', '[email protected]'])
subprocess.check_output(['git', 'config', 'user.name', 'user.name'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3', '-m', 'Release'])
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'remote', 'add', 'origin', origin_repo_dir])
subprocess.check_output(['git', 'fetch'])
subprocess.check_output(['git', 'fetch', '--tags'])
with dlb.ex.Context():
class GitCheckTags(dlb_contrib.git.GitCheckTags):
ANNOTATED_TAG_NAME_REGEX = r'v(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*)){2}' # e.g. 'v1.23.0'
version_tag_names = set(GitCheckTags().start().commit_by_annotated_tag_name)
self.assertEquals({'v1.2.3'}, version_tag_names)
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
class VersionTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_version_is_string_with_dot(self):
# noinspection PyPep8Naming
Tools = [
dlb_contrib.git.GitDescribeWorkingDirectory,
dlb_contrib.git.GitCheckTags
]
class QueryVersion(dlb_contrib.generic.VersionQuery):
VERSION_PARAMETERS_BY_EXECUTABLE = {
Tool.EXECUTABLE: Tool.VERSION_PARAMETERS
for Tool in Tools
}
with dlb.ex.Context():
version_by_path = QueryVersion().start().version_by_path
self.assertEqual(len(QueryVersion.VERSION_PARAMETERS_BY_EXECUTABLE), len(version_by_path))
for Tool in Tools:
path = dlb.ex.Context.active.helper[Tool.EXECUTABLE]
version = version_by_path[path]
self.assertIsInstance(version, str)
self.assertGreaterEqual(version.count('.'), 2)
| gpl-3.0 | 9,056,215,447,418,423,000 | 42.39604 | 116 | 0.592015 | false |
LCOGT/whatsup | whatsup/urls.py | 1 | 1040 | """
WhatsUP: astronomical object suggestions for Las Cumbres Observatory Global Telescope Network
Copyright (C) 2014-2015 LCOGT
urls.py
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.api_root, name='apiroot'),
path('target/',views.TargetDetailView.as_view(), name="api_target"),
path('search/v2/', views.TargetListView.as_view(), name="api_v2_search"),
path('search/', views.TargetListView.as_view(), name="api_search"),
path('range/', views.TargetListRangeView.as_view(), name="api_range"),
]
| gpl-3.0 | -5,789,981,566,066,620,000 | 36.142857 | 93 | 0.749038 | false |
holytortoise/abwreservierung | src/reservierung/views.py | 1 | 19052 | from django.shortcuts import render
from django.views.generic import TemplateView, ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.views.generic.dates import WeekArchiveView
from django.urls import reverse_lazy, reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django import forms as d_forms
import datetime
from . import forms
from . import models
# Create your views here.
class ReservierungList(ListView):
queryset = models.Reservierung.objects.order_by('anfangsDatum','anfangsZeit')
context_object_name = 'reservierungen'
class ReservierungUpdate(LoginRequiredMixin, UpdateView):
login_url = 'account:login'
redirect_field_name = 'redirect_to'
model = models.Reservierung
fields = ['reserviert_für','reservierterRaum', 'reservierungsGrund', 'anfangsDatum',
'endDatum', 'anfangsZeit', 'endZeit']
class ReservierungDelete(LoginRequiredMixin, DeleteView):
login_url = 'account:login'
redirect_field_name = 'redirect_to'
model = models.Reservierung
success_url = reverse_lazy('reservierung:reservierung-list')
template_name = 'reservierung/reservierung_delete.html'
class ReservierungDetail(DetailView):
model = models.Reservierung
context_object_name = 'reservierung'
template_name = 'reservierung/reservierung_detail.html'
# View für das Darstellen der Reservierungen für die aktuelle Woche
def index(request):
"""
Diese Funktion stellt auf der Index Seite die Tabelle für die aktuelle
Woche. Und ermöglicht Durch die Wochen zu gehen
"""
current_week = datetime.date.today().isocalendar()[1]
current_year = datetime.date.today().isocalendar()[0]
is_week = None
if request.method == 'POST':
jahr = int(request.POST['jahr'])
woche = int(request.POST['woche'])
# Wurde der rechte Button für nächste Woche gedrückt wird woche um 1
# hochgezählt
if request.POST.__contains__('next_week'):
if woche == datetime.date(jahr, 12, 28).isocalendar()[1]:
woche = 1
jahr = jahr + 1
else:
woche = woche + 1
# Wurde der linke Button gedrückt wird Woche heruntergezählt
if request.POST.__contains__('last_week'):
if woche == 1:
jahr = jahr -1
woche = datetime.date(jahr,12,28).isocalendar()[1]
else:
woche = woche - 1
else:
jahr = datetime.date.today().isocalendar()[0]
woche = datetime.date.today().isocalendar()[1]
# Ergibt True wenn die aktuelle Woche gleich der auf dem Schild angezeigten ist
if woche == current_week and jahr == current_year:
is_week = True
if woche != current_week or jahr != current_year:
is_week = False
# Erzeuge daten für die Aktuelle Woche
datum = str(jahr)+'-W'+str(woche)
r = datetime.datetime.strptime(datum + '-0', "%Y-W%W-%w")
start = r - datetime.timedelta(days=r.weekday())
end = start + datetime.timedelta(days=6)
start = start.strftime('%d.%m')
end = end.strftime('%d.%m')
rooms = models.Raum.objects.all()
rooms_return = []
for room in rooms:
room_return = []
reservierungen = models.Reservierung.objects.filter(
reservierterRaum=room).order_by('anfangsDatum')
for reservierung in reservierungen:
if reservierung.anfangsDatum.isocalendar()[1] < woche and woche < reservierung.endDatum.isocalendar()[1]:
room_return.append(reservierung)
if ((reservierung.anfangsDatum.isocalendar()[1] == woche and reservierung.anfangsDatum.isocalendar()[0] == jahr)
or (reservierung.endDatum.isocalendar()[1] == woche and reservierung.endDatum.isocalendar()[0] == jahr)):
room_return.append(reservierung)
if len(room_return) != 0:
rooms_return.append(room_return)
if len(rooms_return) == 0:
rooms_return = None
context_dict = {'rooms_return':rooms_return,'reserv':reservierungen,
'woche':woche,'jahr':jahr,'current_week':current_week,
'current_year':current_year,'is_week':is_week,'start':start,'end':end}
return render(request, 'index.html', context_dict)
# View um Reservierungen zu erstellen
@login_required(login_url='account:login')
def reservierung_form(request):
"""
Diese Funktion ist für die neuen Reservierungen zuständig.
Sie Überprüft ob der Raum für den gewünschten Zeitraum zur verfügung steht.
Wenn ja wird eine neue Reservierung angelegt und der Nutzer wird zur Index
seite Umgeleitet. Wenn nein dann werden dem Nutzer alternative Räume
vorgeschlagen, welche zum gewünschten Zeitpunkt frei sind.
"""
nutzer = request.user
free_rooms = None
reserv = None
moeglich = False
if request.method == 'POST':
form = forms.ReservierungForm(data=request.POST)
if form.is_valid():
free_rooms = []
reservierungen = models.Reservierung.objects.filter(
reservierterRaum=form.cleaned_data.get("reservierterRaum"))
if reservierungen.exists():
for reservierung in reservierungen:
if reservierung.täglich:
# liegt form.anfangsDatum in einer bereits bestehenden
# reservierung
if reservierung.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < reservierung.endDatum:
# ist die reservierung täglich
if form.cleaned_data.get("täglich"):
# liegt die r.endZeit vor f.anfangsZeit oder
# r.anfangsZeit nach f.endZeit
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit") or reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
# trifft zu also reservierung möglich
moeglich = True
else:
moeglich = False
reserv = reservierung
break
else:
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
moeglich = True
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
moeglich = True
else:
# reservierung ganztägig
# nicht möglich
moeglich = False
reserv = reservierung
break
else:
# liegt f.anfangsDatum nach r.endDatum
if reservierung.endDatum < form.cleaned_data.get("anfangsDatum"):
moeglich = True
# liegen r.endDatum und f.anfangsDatum auf den
# gleichen Tag
elif reservierung.endDatum == form.cleaned_data.get("anfangsDatum"):
# liegt die r.endZeit vor f.anfangsZeit
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
# reservierung möglich
moeglich = True
# liegt r.anfangsZeit nach f.endZeit
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
# reservierung möglich
moeglich = True
else:
# Reservierung nicht möglich
moeglich = False
reserv = reservierung
break
# ist r.anfangsDatum und f.endDatum am gleichen Tag
elif reservierung.anfangsDatum == form.cleaned_data.get("endDatum"):
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
# reservierung möglich
moeglich = True
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
# reservierung möglich
moeglich = True
else:
moeglich = False
reserv = reservierung
break
else:
if reservierung.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < reservierung.endDatum:
# fehlermeldung anzeigen
# verfügbare räume anzeigen
# reservierung die belegt anzeigen
moeglich = False
reserv = reservierung
break
else:
# aktuelle reservierungsende liegt vor dem beginn
# der neuen
if reservierung.endDatum < form.cleaned_data.get("anfangsDatum"):
moeglich = True
# reservierungsende und beginn der neuen gleicher
# tag
elif reservierung.endDatum == form.cleaned_data.get("anfangsDatum"):
# reservierungs zeit ende vor oder gleich der
# neuen anfangszeit
if reservierung.endZeit <= form.cleaned_data.get("anfangsZeit"):
moeglich = True
elif reservierung.anfangsZeit >= form.cleaned_data.get("endZeit"):
moeglich = True
else:
moeglich = False
reserv = reservierung
break
elif reservierung.anfangsDatum > form.cleaned_data.get("endDatum"):
moeglich = True
elif reservierung.anfangsDatum == form.cleaned_data.get("endDatum"):
if reservierung.anfangsZeit > form.cleaned_data.get("endZeit"):
moeglich = True
else:
moeglich = False
reserv = reservierung
break
else:
moeglich = True
if moeglich:
reserv = models.Reservierung()
reserv.reserviert_von = request.user
if form.cleaned_data.get("reserviertFür") == "":
reserv.reserviert_für = request.user.last_name
else:
reserv.reserviert_für = form.cleaned_data.get("reserviertFür")
reserv.reservierterRaum = models.Raum.objects.get(
id=form.cleaned_data.get("reservierterRaum"))
reserv.reservierungsGrund = form.cleaned_data.get(
"reservierungsGrund")
reserv.anfangsDatum = form.cleaned_data.get("anfangsDatum")
reserv.endDatum = form.cleaned_data.get("endDatum")
reserv.anfangsZeit = form.cleaned_data.get("anfangsZeit")
reserv.endZeit = form.cleaned_data.get("endZeit")
reserv.täglich = form.cleaned_data.get("täglich")
reserv.save()
return HttpResponseRedirect(reverse('reservierung:index'))
else:
# return free rooms
# restlichen reservierungen anschauen
rooms = models.Raum.objects.exclude(
id=form.cleaned_data.get("reservierterRaum"))
if rooms.exists():
for room in rooms:
room_reservs = models.Reservierung.objects.filter(
reservierterRaum=room)
# existieren reservierungen
if room_reservs.exists():
# für alle reservierungen
free_room = False
for room_reserv in room_reservs:
# liegt die reservierung in dem zeitraum einer
# bestehenden Reservierung
if form.cleaned_data.get("täglich"):
if room_reserv.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < room_reserv.endDatum:
if room_reserv.täglich:
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit") or room_reserv.anfangsZeit > form.cleaned_data.get("endZeit"):
free_room = True
else:
free_room = False
break
else:
free_room = False
break
else:
if room_reserv.endDatum < form.cleaned_data.get("anfangsDatum"):
free_room = True
elif room_reserv.endDatum == form.cleaned_data.get("anfangsDatum"):
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit"):
free_room = True
elif room_reserv.anfangsZeit >= form.cleaned_data.get("endZeit"):
free_room = True
else:
free_room = False
break
elif room_reserv.anfangsDatum == form.cleaned_data.get("endDatum"):
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit"):
free_room = True
elif room_reserv.anfangsZeit >= form.cleaned_data.get("endZeit"):
free_room = True
else:
free_room = False
break
else:
if room_reserv.anfangsDatum < form.cleaned_data.get("anfangsDatum") and form.cleaned_data.get("anfangsDatum") < room_reserv.endDatum:
# ja, raum also nicht frei
free_room = False
break
else:
# nein, also raum eventuell frei,
# prüfen ob anfangsDatum nach oder am
# endDatum
if room_reserv.endDatum < form.cleaned_data.get("anfangsDatum"):
# Raum Frei
free_room = True
elif room_reserv.endDatum == form.cleaned_data.get("anfangsDatum"):
# gleicher Tag
if room_reserv.endZeit <= form.cleaned_data.get("anfangsZeit"):
# Raum Frei
free_room = True
else:
# Raum ist nicht Frei
free_room = False
break
elif room_reserv.anfangsDatum > form.cleaned_data.get("endDatum"):
# Raum Frei
free_room = True
elif room_reserv.anfangsDatum == form.cleaned_data.get("endDatum"):
if room_reserv.anfangsZeit > form.cleaned_data.get("endZeit"):
# Raum frei
free_room = True
else:
# Raum nicht Frei
free_room = False
break
if free_room:
free_rooms.append(room)
else:
free_rooms.append(room)
else:
free_rooms = models.Raum.objects.all()
else:
form = forms.ReservierungForm()
return render(request, 'reservierung/reservierung_form.html', {'form': form, 'reserv': reserv, 'free_rooms': free_rooms, })
# View zum anzeigen aller Reservierungen des angemeldeten nutzers
@login_required(login_url='account:login')
def reservierung_user(request):
user = request.user
rooms = models.Raum.objects.all()
rooms_return = []
for room in rooms:
room_return = []
reservierungen = models.Reservierung.objects.filter(
reservierterRaum=room).order_by('anfangsDatum')
for reservierung in reservierungen:
if reservierung.reserviert_von == user:
room_return.append(reservierung)
rooms_return.append(room_return)
return render(request, 'reservierung/reservierung_user.html', {'user': user, 'rooms_return': rooms_return, })
| mit | 3,023,080,012,261,107,700 | 52.847025 | 169 | 0.476378 | false |
alexhersh/calico | calico/common.py | 1 | 22270 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
calico.common
~~~~~~~~~~~~
Calico common utilities.
"""
import errno
import logging
import logging.handlers
import os
import re
import sys
from types import StringTypes
import netaddr
import netaddr.core
from netaddr.strategy import eui48
_log = logging.getLogger(__name__)
AGENT_TYPE_CALICO = 'Calico agent'
FORMAT_STRING = '%(asctime)s [%(levelname)s][%(process)s/%(thread)d] %(name)s %(lineno)d: %(message)s'
# Used "tid", which we swap for the greenlet ID, instead of "thread"
FORMAT_STRING_GEVENT = '%(asctime)s [%(levelname)s][%(process)s/%(tid)d] %(name)s %(lineno)d: %(message)s'
# This format string deliberately uses two different styles of format
# specifier. The %()s form is used by the logging module: the {} form is used
# by the code in this module. This allows us to dynamically generate the format
# string used by the logger.
SYSLOG_FORMAT_STRING = '{excname}[%(process)s]: %(module)s@%(lineno)d %(message)s'
# White-list for the --protocol match criteria. We allow the guaranteed
# string shortcuts as well as int/string versions of the raw IDs. We disallow
# 0 because the kernel cannot match on it directly.
KERNEL_PROTOCOLS = set(["tcp", "udp", "icmp", "icmpv6", "sctp", "udplite"])
KERNEL_PROTOCOLS.update(xrange(1, 256))
KERNEL_PROTOCOLS.update(intern(str(p)) for p in xrange(1, 256))
# Protocols that support a port match in iptables. We allow the name and
# protocol number.
KERNEL_PORT_PROTOCOLS = set([
"tcp", 6, "6",
"udp", 17, "17",
"udplite", 136, "136",
"sctp", 132, "132",
"dccp", 33, "33",
])
# Valid keys for a rule JSON dict.
KNOWN_RULE_KEYS = set([
"action",
"protocol",
"src_net",
"src_tag",
"src_ports",
"dst_net",
"dst_tag",
"dst_ports",
"icmp_type",
"icmp_code",
"ip_version",
])
# Regex that matches only names with valid characters in them. The list of
# valid characters is the same for endpoints, profiles, and tags.
VALID_ID_RE = re.compile('^[a-zA-Z0-9_\.\-]+$')
VALID_LINUX_IFACE_NAME_RE = re.compile(r'^[a-zA-Z0-9_]{1,15}$')
# Not that thorough: we don't care if it's a valid CIDR, only that it doesn't
# have anything malicious in it.
VALID_IPAM_POOL_ID_RE = re.compile(r'^[0-9\.:a-fA-F\-]{1,43}$')
EXPECTED_IPAM_POOL_KEYS = set(["cidr", "masquerade"])
def validate_port(port):
"""
Validates that a port is valid. Returns true if valid, false if not.
"""
try:
port_int = int(port)
if port_int <= 0 or port_int > 65535:
return False
else:
return True
except ValueError:
return False
def validate_ip_addr(addr, version=None):
"""
Validates that an IP address is valid. Returns true if valid, false if
not. Version can be "4", "6", None for "IPv4", "IPv6", or "either"
respectively.
"""
if version == 4:
return netaddr.valid_ipv4(addr)
elif version == 6:
return netaddr.valid_ipv6(addr)
else:
return netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr)
def canonicalise_ip(addr, version):
if addr is None:
return None
ip = netaddr.IPAddress(addr, version=version)
return intern(str(ip))
def validate_cidr(cidr, version):
"""
Validates that a CIDR is valid. Returns true if valid, false if
not. Version can be "4", "6", None for "IPv4", "IPv6", or "either"
respectively.
"""
try:
ip = netaddr.IPNetwork(cidr, version=version)
return True
except (netaddr.core.AddrFormatError, ValueError, TypeError):
return False
def canonicalise_cidr(cidr, version):
if cidr is None:
return None
nw = netaddr.IPNetwork(cidr, version=version)
return intern(str(nw))
def canonicalise_mac(mac):
# Use the Unix dialect, which uses ':' for its separator instead of
# '-'. This fits best with what iptables is expecting.
eui = netaddr.EUI(mac, dialect=eui48.mac_unix)
return str(eui)
def mkdir_p(path):
"""http://stackoverflow.com/a/600612/190597 (tzot)"""
try:
os.makedirs(path, exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def default_logging(gevent_in_use=True, syslog_executable_name=None):
"""
Sets up the Calico default logging, with default severities.
Our default logging consists of:
- setting the log level of the root logger to DEBUG (a safe initial value)
- attaching a SysLog handler with no formatter (log to syslog), ERROR level
only
- attaching a StreamHandler with the Calico formatter, to log to stdout,
with ERROR level
This default explicitly excludes adding logging to file. This is because
working out what file to log to requires reading the configuration file,
and doing that may cause errors that we want to log! To add a file logger,
call :meth:`complete_logging() <calico.common.complete_logging>` after
this function has been called.
"""
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
executable_name = syslog_executable_name or os.path.basename(sys.argv[0])
syslog_format = SYSLOG_FORMAT_STRING.format(excname=executable_name)
syslog_formatter = logging.Formatter(syslog_format)
if os.path.exists("/dev/log"):
syslog_handler = logging.handlers.SysLogHandler(address='/dev/log')
else:
# Probably unit tests running on windows.
syslog_handler = logging.handlers.SysLogHandler()
syslog_handler.setLevel(logging.ERROR)
syslog_handler.setFormatter(syslog_formatter)
root_logger.addHandler(syslog_handler)
format_string = FORMAT_STRING_GEVENT if gevent_in_use else FORMAT_STRING
file_formatter = logging.Formatter(format_string)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.ERROR)
stream_handler.setFormatter(file_formatter)
if gevent_in_use:
from geventutils import GreenletFilter
stream_handler.addFilter(GreenletFilter())
root_logger.addHandler(stream_handler)
def complete_logging(logfile=None,
file_level=logging.DEBUG,
syslog_level=logging.ERROR,
stream_level=logging.ERROR,
gevent_in_use=True):
"""
Updates the logging configuration based on learned configuration.
The purpose of this function is to update the previously set logging
configuration such that we can start logging to file. This is done in a
separate step to the initial logging configuration in order to ensure that
logging is available as early in execution as possible, i.e. before the
config file has been parsed.
This function must only be called once, after
:meth:`default_logging() <calico.common.default_logging>`
has been called.
The xyz_level parameters may be a valid logging level DEBUG/INFO/... or
None to disable that log entirely. Note: the config module supports
using the string "none" in the configuration to disable logging.
"""
root_logger = logging.getLogger()
# If default_logging got called already, we'll have some loggers in place.
# Update their levels.
file_handler = None
for handler in root_logger.handlers[:]:
if isinstance(handler, logging.handlers.SysLogHandler):
if syslog_level is None:
root_logger.removeHandler(handler)
else:
handler.setLevel(syslog_level)
elif isinstance(handler, logging.StreamHandler):
if stream_level is None:
root_logger.removeHandler(handler)
else:
handler.setLevel(stream_level)
elif isinstance(handler, logging.handlers.WatchedFileHandler):
file_handler = handler
if file_level is None:
root_logger.removeHandler(handler)
else:
handler.setLevel(file_level)
# If we've been given a log file, log to file as well.
if logfile and file_level is not None:
if not file_handler:
mkdir_p(os.path.dirname(logfile))
format_string = (FORMAT_STRING_GEVENT if gevent_in_use
else FORMAT_STRING)
formatter = logging.Formatter(format_string)
file_handler = logging.handlers.WatchedFileHandler(logfile)
if gevent_in_use:
from geventutils import GreenletFilter
file_handler.addFilter(GreenletFilter())
file_handler.setLevel(file_level)
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
# Optimization: disable all logging below the minimum level that we care
# about. The global "disable" setting is the first thing that gets checked
# in the logging framework so it's the fastest way to disable logging.
levels = [file_level, syslog_level, stream_level]
# Map None to something greater than the highest logging level.
levels = [l if l is not None else logging.CRITICAL + 1 for l in levels]
min_log_level = min(levels)
logging.disable(min_log_level - 1)
_log.info("Logging initialized")
class ValidationFailed(Exception):
"""
Class used for data validation exceptions.
"""
pass
def validate_endpoint(config, combined_id, endpoint):
"""
Ensures that the supplied endpoint is valid. Once this routine has returned
successfully, we know that all required fields are present and have valid
values.
Has the side-effect of putting IP and MAC addresses in canonical form in
the input dict.
:param config: configuration structure
:param combined_id: EndpointId object
:param endpoint: endpoint dictionary as read from etcd
:raises ValidationFailed
"""
issues = []
if not isinstance(endpoint, dict):
raise ValidationFailed("Expected endpoint to be a dict.")
if not VALID_ID_RE.match(combined_id.endpoint):
issues.append("Invalid endpoint ID '%r'." % combined_id.endpoint)
if "state" not in endpoint:
issues.append("Missing 'state' field.")
elif endpoint["state"] not in ("active", "inactive"):
issues.append("Expected 'state' to be one of active/inactive.")
for field in ["name", "mac"]:
if field not in endpoint:
issues.append("Missing '%s' field." % field)
elif not isinstance(endpoint[field], StringTypes):
issues.append("Expected '%s' to be a string; got %r." %
(field, endpoint[field]))
elif field == "mac":
if not netaddr.valid_mac(endpoint.get("mac")):
issues.append("Invalid MAC address")
else:
endpoint["mac"] = canonicalise_mac(endpoint.get("mac"))
if "profile_id" in endpoint:
if "profile_ids" not in endpoint:
endpoint["profile_ids"] = [endpoint["profile_id"]]
del endpoint["profile_id"]
if "profile_ids" not in endpoint:
issues.append("Missing 'profile_id(s)' field.")
else:
for value in endpoint["profile_ids"]:
if not isinstance(value, StringTypes):
issues.append("Expected profile IDs to be strings.")
break
if not VALID_ID_RE.match(value):
issues.append("Invalid profile ID '%r'." % value)
if ("name" in endpoint and isinstance(endpoint['name'], StringTypes)
and combined_id.host == config.HOSTNAME
and not endpoint["name"].startswith(config.IFACE_PREFIX)):
# Only test the interface for local endpoints - remote hosts may have
# a different interface prefix.
issues.append("Interface %r does not start with %r." %
(endpoint["name"], config.IFACE_PREFIX))
for version in (4, 6):
nets = "ipv%d_nets" % version
if nets not in endpoint:
endpoint[nets] = []
else:
canonical_nws = []
nets_list = endpoint.get(nets, [])
if not isinstance(nets_list, list):
issues.append("%s should be a list" % nets)
else:
for ip in nets_list:
if not validate_cidr(ip, version):
issues.append("IP address %r is not a valid "
"IPv%d CIDR." % (ip, version))
break
else:
canonical_nws.append(canonicalise_cidr(ip, version))
endpoint[nets] = canonical_nws
gw_key = "ipv%d_gateway" % version
try:
gw_str = endpoint[gw_key]
if gw_str is not None and not validate_ip_addr(gw_str,
version):
issues.append("%s is not a valid IPv%d gateway address." %
(gw_key, version))
else:
endpoint[gw_key] = canonicalise_ip(gw_str, version)
except KeyError:
pass
if issues:
raise ValidationFailed(" ".join(issues))
def validate_rules(profile_id, rules):
"""
Ensures that the supplied rules are valid. Once this routine has returned
successfully, we know that all required fields are present and have valid
values.
:param profile_id: Profile ID from etcd
:param rules: rules list as read from etcd
:raises ValidationFailed
"""
issues = []
if not isinstance(rules, dict):
raise ValidationFailed("Expected rules to be a dict.")
if not VALID_ID_RE.match(profile_id):
issues.append("Invalid profile_id '%r'." % profile_id)
for dirn in ("inbound_rules", "outbound_rules"):
if dirn not in rules:
issues.append("No %s in rules." % dirn)
continue
if not isinstance(rules[dirn], list):
issues.append("Expected rules[%s] to be a list." % dirn)
continue
for rule in rules[dirn]:
if not isinstance(rule, dict):
issues.append("Rules should be dicts.")
break
for key, value in rule.items():
if value is None:
del rule[key]
# Absolutely all fields are optional, but some have valid and
# invalid values.
protocol = rule.get('protocol')
if protocol is not None and protocol not in KERNEL_PROTOCOLS:
issues.append("Invalid protocol %s in rule %s" %
(protocol, rule))
elif protocol is not None:
protocol = intern(str(protocol))
rule['protocol'] = str(protocol)
ip_version = rule.get('ip_version')
if ip_version is not None and ip_version not in (4, 6):
# Bad IP version prevents further validation
issues.append("Invalid ip_version in rule %s." % rule)
continue
if ip_version == 4 and protocol == "icmpv6":
issues.append("Using icmpv6 with IPv4 in rule %s." % rule)
if ip_version == 6 and protocol == "icmp":
issues.append("Using icmp with IPv6 in rule %s." % rule)
for tag_type in ('src_tag', 'dst_tag'):
tag = rule.get(tag_type)
if tag is None:
continue
if not VALID_ID_RE.match(tag):
issues.append("Invalid %s '%r'." % (tag_type, tag))
for key in ("src_net", "dst_net"):
network = rule.get(key)
if (network is not None and
not validate_cidr(rule[key], ip_version)):
issues.append("Invalid CIDR (version %s) in rule %s." %
(ip_version, rule))
elif network is not None:
rule[key] = canonicalise_cidr(network, ip_version)
for key in ("src_ports", "dst_ports"):
ports = rule.get(key)
if (ports is not None and
not isinstance(ports, list)):
issues.append("Expected ports to be a list in rule %s."
% rule)
continue
if ports is not None:
if protocol not in KERNEL_PORT_PROTOCOLS:
issues.append("%s is not allowed for protocol %s in "
"rule %s" % (key, protocol, rule))
for port in ports:
error = validate_rule_port(port)
if error:
issues.append("Invalid port %s (%s) in rule %s." %
(port, error, rule))
action = rule.get('action')
if (action is not None and
action not in ("allow", "deny")):
issues.append("Invalid action in rule %s." % rule)
icmp_type = rule.get('icmp_type')
if icmp_type is not None:
if not isinstance(icmp_type, int):
issues.append("ICMP type is not an integer in rule %s." %
rule)
elif not 0 <= icmp_type <= 255:
issues.append("ICMP type is out of range in rule %s." %
rule)
icmp_code = rule.get("icmp_code")
if icmp_code is not None:
if not isinstance(icmp_code, int):
issues.append("ICMP code is not an integer in rule %s." %
rule)
elif not 0 <= icmp_code <= 255:
issues.append("ICMP code is out of range.")
if icmp_type is None:
# TODO: ICMP code without ICMP type not supported by iptables
# Firewall against that for now.
issues.append("ICMP code specified without ICMP type.")
unknown_keys = set(rule.keys()) - KNOWN_RULE_KEYS
if unknown_keys:
issues.append("Rule contains unknown keys: %s." % unknown_keys)
if issues:
raise ValidationFailed(" ".join(issues))
def validate_rule_port(port):
"""
Validates that any value in a port list really is valid.
Valid values are an integer port, or a string range separated by a colon.
:param port: the port, which is validated for type
:returns: None or an error string if invalid
"""
if isinstance(port, int):
if port < 0 or port > 65535:
return "integer out of range"
return None
# If not an integer, must be format N:M, i.e. a port range.
try:
fields = port.split(":")
except AttributeError:
return "neither integer nor string"
if not len(fields) == 2:
return "range unparseable"
try:
start = int(fields.pop(0))
end = int(fields.pop(0))
except ValueError:
return "range invalid"
if start >= end or start < 0 or end > 65535:
return "range invalid"
return None
def validate_tags(profile_id, tags):
"""
Ensures that the supplied tags are valid. Once this routine has returned
successfully, we know that all required fields are present and have valid
values.
:param profile_id: profile_id as read from etcd
:param tags: tag set as read from etcd
:raises ValidationFailed
"""
issues = []
if not VALID_ID_RE.match(profile_id):
issues.append("Invalid profile_id '%r'." % profile_id)
if not isinstance(tags, list):
issues.append("Expected tags to be a list.")
else:
for tag in tags:
if not isinstance(tag, StringTypes):
issues.append("Expected tag '%s' to be a string." % tag)
break
if not VALID_ID_RE.match(tag):
issues.append("Invalid tag '%r'." % tag)
if issues:
raise ValidationFailed(" ".join(issues))
def validate_ipam_pool(pool_id, pool, ip_version):
"""
Validates and canonicalises an IPAM pool dict. Removes any fields that
it doesn't know about.
Modifies the dict in-place.
"""
if not isinstance(pool, dict):
raise ValidationFailed("Pool should be a dict")
# Remove any keys that we're not expecting. Stops unvalidated data from
# slipping through. We ignore other keys since this structure is used
# by calicoctl for its own purposes too.
keys_to_remove = set()
for key in pool:
if key not in EXPECTED_IPAM_POOL_KEYS:
keys_to_remove.add(key)
for key in keys_to_remove:
pool.pop(key)
issues = []
if "cidr" not in pool:
# CIDR is mandatory.
issues.append("'cidr' field is missing")
else:
cidr = pool["cidr"]
if cidr is None or not validate_cidr(cidr, ip_version):
issues.append("Invalid CIDR: %r" % cidr)
else:
pool["cidr"] = canonicalise_cidr(cidr, ip_version)
if not isinstance(pool.get("masquerade", False), bool):
issues.append("Invalid 'masquerade' field: %r" % pool["masquerade"])
if not VALID_IPAM_POOL_ID_RE.match(pool_id):
issues.append("Invalid pool ID: %r" % pool)
if issues:
raise ValidationFailed(','.join(issues))
| apache-2.0 | 5,901,292,691,446,355,000 | 35.094003 | 106 | 0.598563 | false |
sternb0t/django-pandas | django_pandas/io.py | 1 | 3578 | import pandas as pd
from .utils import update_with_verbose
import django
def to_fields(qs, fieldnames):
for fieldname in fieldnames:
model = qs.model
for fieldname_part in fieldname.split('__'):
try:
field = model._meta.get_field(fieldname_part)
except django.db.models.fields.FieldDoesNotExist:
rels = model._meta.get_all_related_objects_with_model()
for relobj, _ in rels:
if relobj.get_accessor_name() == fieldname_part:
field = relobj.field
model = field.model
break
else:
if hasattr(field, "one_to_many") and field.one_to_many:
model = field.related_model
elif field.get_internal_type() in ('ForeignKey', 'OneToOneField', 'ManyToManyField'):
model = field.rel.to
yield field
def read_frame(qs, fieldnames=(), index_col=None, coerce_float=False,
verbose=True):
"""
Returns a dataframe from a QuerySet
Optionally specify the field names/columns to utilize and
a field as the index
Parameters
----------
qs: The Django QuerySet.
fieldnames: The model field names to use in creating the frame.
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
index_col: specify the field to use for the index. If the index
field is not in the field list it will be appended
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric data (like
decimal.Decimal) to floating point, useful for SQL result sets
verbose: boolean If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values.
The human readable version of the foreign key field is
defined in the ``__unicode__`` or ``__str__``
methods of the related class definition
"""
if fieldnames:
if index_col is not None and index_col not in fieldnames:
# Add it to the field names if not already there
fieldnames = tuple(fieldnames) + (index_col,)
fields = to_fields(qs, fieldnames)
elif isinstance(qs, django.db.models.query.ValuesQuerySet):
if django.VERSION < (1, 8):
annotation_field_names = qs.aggregate_names
else:
annotation_field_names = qs.annotation_names
fieldnames = qs.field_names + annotation_field_names + qs.extra_names
fields = [qs.model._meta.get_field(f) for f in qs.field_names] + \
[None] * (len(annotation_field_names) + len(qs.extra_names))
else:
fields = qs.model._meta.fields
fieldnames = [f.name for f in fields]
if isinstance(qs, django.db.models.query.ValuesQuerySet):
recs = list(qs)
else:
recs = list(qs.values_list(*fieldnames))
df = pd.DataFrame.from_records(recs, columns=fieldnames,
coerce_float=coerce_float)
if verbose:
update_with_verbose(df, fieldnames, fields)
if index_col is not None:
df.set_index(index_col, inplace=True)
return df
| bsd-3-clause | 5,211,177,889,404,389,000 | 36.663158 | 101 | 0.602571 | false |
Urumasi/Flask-Bones | app/data/models/oauth.py | 1 | 5170 | from flask_login import UserMixin
from app.extensions import cache ,bcrypt
import bcrypt as bcr
from .. import db
from ..mixins import CRUDMixin
import datetime
from rauth import OAuth1Service, OAuth2Service
from flask import current_app, url_for, request, redirect, session
class Oauth(CRUDMixin, UserMixin, db.Model):
__tablename__ = 'oauth'
id = db.Column(db.Integer, primary_key=True)
#user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
#oauth = db.relationship("Oauth", back_populates="users")
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
social_id = db.Column(db.String(64), nullable=False, unique=True)
nickname = db.Column(db.String(64), nullable=True)
email = db.Column(db.String(64), nullable=True)
jmeno = db.Column(db.String(128), nullable=False)
prijmeni = db.Column(db.String(128), nullable=False)
profile_url = db.Column(db.String(128), nullable=False)
image_url = db.Column(db.String(128), nullable=False)
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('public.oauth_callback', provider=self.provider_name,
_external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclasses__():
provider = provider_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None, None, None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email,name').json()
profile_url = "http://facebook.com/profile.php?id=%s" % me['id']
image_url = "http://graph.facebook.com/%s/picture" % me['id']
return (
'facebook$' + me['id'],
me.get('email').split('@')[0] if me.get('email') is not None else "anon"+me['id'],
me.get('email'),
me['name'].split(' ')[0],
me['name'].split(' ')[1],
profile_url,
image_url
)
class TwitterSignIn(OAuthSignIn):
def __init__(self):
super(TwitterSignIn, self).__init__('twitter')
self.service = OAuth1Service(
name='twitter',
consumer_key=self.consumer_id,
consumer_secret=self.consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
authorize_url='https://api.twitter.com/oauth/authorize',
access_token_url='https://api.twitter.com/oauth/access_token',
base_url='https://api.twitter.com/1.1/'
)
def authorize(self):
request_token = self.service.get_request_token(
params={'oauth_callback': self.get_callback_url()}
)
session['request_token'] = request_token
return redirect(self.service.get_authorize_url(request_token[0]))
def callback(self):
request_token = session.pop('request_token')
if 'oauth_verifier' not in request.args:
return None, None, None, None, None, None, None
oauth_session = self.service.get_auth_session(
request_token[0],
request_token[1],
data={'oauth_verifier': request.args['oauth_verifier']}
)
me = oauth_session.get('account/verify_credentials.json').json()
social_id = 'twitter$' + str(me.get('id'))
username = me.get('screen_name')
name = me.get('name').split(' ')
return (
social_id,
username,
None,
name[0],
name[1] if name.__len__()>1 else '',
#'@%s' % me.get('screen_name') - display name (@Atheloses)
"http://twitter.com/%s" % me.get('screen_name'),
me.get('profile_image_url')
) | mit | -6,923,497,179,262,909,000 | 37.303704 | 94 | 0.589555 | false |
ResEl-TB/stages | search/tests.py | 1 | 2810 | from django.test import TestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from post.models import Zone, Duree, Domaine, TypeContrat, Annonce
from .forms import SearchForm
class SearchForms(TestCase):
def setUp(self):
zone = Zone.objects.create(nom='île-de-france')
duree = Duree.objects.create(duree='6 mois')
domain = Domaine.objects.create(nom='Télécoms')
contract = TypeContrat.objects.create(type_contrat='stage')
for i in range(5):
Annonce.objects.create(
zone=zone,
duree=duree,
type_de_contrat=contract
).domain.add(domain)
def test_search_empty(self):
form = SearchForm(data={})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.build_queryset()), 5)
def test_search_not_empty(self):
form = SearchForm(data={'nom_entreprise': 'thales'})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.build_queryset()), 0)
def test_search_zone(self):
form = SearchForm(data={'zone': 1})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.build_queryset()), 5)
def test_search_domain(self):
form = SearchForm(data={'domain': [1]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.build_queryset()), 5)
class SearchViews(TestCase):
def setUp(self):
User.objects.create_user('john', '[email protected]', 'password')
zone = Zone.objects.create(nom='île-de-france')
duree = Duree.objects.create(duree='6 mois')
domain = Domaine.objects.create(nom='Télécoms')
contract = TypeContrat.objects.create(type_contrat='stage')
for i in range(5):
Annonce.objects.create(
zone=zone,
duree=duree,
type_de_contrat=contract
).domain.add(domain)
def test_index_login(self):
response = self.client.get(reverse('search:index'))
self.assertEqual(response.status_code, 302)
def test_index_normal(self):
self.client.login(username='john', password='password')
response = self.client.get(reverse('search:index'))
self.assertTrue(response.status_code, 200)
self.assertTemplateUsed(response, 'search/index.html')
self.assertEqual(len(response.context.get('object_list')), 5)
def test_index_search(self):
self.client.login(username='john', password='password')
response = self.client.get(reverse('search:index'), {'zone': '1'})
self.assertTrue(response.status_code, 200)
self.assertTemplateUsed(response, 'search/index.html')
self.assertEqual(len(response.context.get('object_list')), 5) | gpl-2.0 | -17,203,318,458,574,098 | 38.507042 | 74 | 0.634451 | false |
elainenaomi/sciwonc-dataflow-examples | sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/python2.7/dist-packages/Pegasus/monitoring/notifications.py | 1 | 34263 | """
Class for managing notifications in pegasus-monitord.
"""
##
# Copyright 2007-2011 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Import Python modules
import os
import sys
import math
import time
import shlex
import signal
import logging
import tempfile
import subprocess
from Pegasus.tools import utils
NOTIFICATION_FILE = "monitord-notifications.log" # filename for writing the output of notification scripts
WAIT_CHILD_FINISH = 5 # in seconds
logger = logging.getLogger(__name__)
class Notifications:
"""
This object contains all functions needed for managing
notifications and starting notification scripts.
"""
def __init__(self, notification_file_prefix,
max_parallel_notifications=10, notifications_timeout=0):
"""
This function initialized the Notifications class.
"""
self._active_notifications = []
self._pending_notifications = []
self._max_parallel_notifications = max_parallel_notifications
self._notifications_timeout = notifications_timeout
self._notifications_fn = os.path.join(notification_file_prefix, NOTIFICATION_FILE)
self._notifications_log = None
self._notifications = {}
# Open notifications' log file
try:
self._notifications_log = open(self._notifications_fn, 'a')
except IOError:
logger.critical("cannot create notifications' log file... exiting...")
sys.exit(1)
def has_pending_notifications(self):
"""
This function returns True if we have pending notifications.
"""
return len(self._pending_notifications) > 0
def has_active_notifications(self):
"""
This function returns True if we have active notifications.
"""
return len(self._active_notifications) > 0
def terminate_notification(self, my_entry):
"""
This function terminates a notification process, and cleans up its
output/error files.
"""
my_p = my_entry["subp"]
my_pid = my_entry["pid"]
my_notification = my_entry["notification"]
my_out_fn = my_entry["out_fn"]
my_err_fn = my_entry["err_fn"]
my_out_fd = my_entry["out_fd"]
my_err_fd = my_entry["err_fd"]
my_action = my_entry["action"]
my_p.poll()
# If process hasn't finished...
if my_p.returncode is None:
# Send SIGTERM first...
try:
os.kill(my_pid, signal.SIGTERM)
except OSError:
logger.info("error sending SIGTERM to notification script...")
# Wait for child to finish
logger.warning("waiting for notification process to finish: %s - %s"
% (my_notification, my_action))
time.sleep(WAIT_CHILD_FINISH)
my_p.poll()
if my_p.returncode is None:
# Send SIGKILL now...
logger.warning("killing notification process to finish: %s - %s"
% (my_notification, my_action))
try:
os.kill(my_pid, signal.SIGKILL)
except OSError:
logger.info("error sending SIGKILL to notification script...")
# Finally, clean up files...
try:
os.unlink(my_out_fn)
os.unlink(my_err_fn)
except OSError:
# No error here...
pass
logger.warning("notification terminated: %s - %s" % (my_notification, my_action))
def service_notifications(self):
"""
This function services notifications. It chekcs the notifications
in the active list to see if they have finished. If so, it copies
the stdout/stderr from these notifications to the
monitord-notifications.log file. For notifications in the
pending_notifications list, it starts the notification scripts,
unless there are already too many notifications running in the
system.
"""
logger.info("active notifications %d, pending notifications: %d"
% (len(self._active_notifications), len(self._pending_notifications)))
# Step 1: Look at existing notifications
if len(self._active_notifications) > 0:
# We have active notifications, let's check on their statuses
my_notif_index = 0
while my_notif_index < len(self._active_notifications):
my_active_notif = self._active_notifications[my_notif_index]
# Get subprocess object
my_active_p = my_active_notif["subp"]
my_status = my_active_p.poll()
if my_status is not None:
# Process finished notification
my_finished_out_fn = my_active_notif["out_fn"]
my_finished_err_fn = my_active_notif["err_fn"]
my_finished_out_fd = my_active_notif["out_fd"]
my_finished_err_fd = my_active_notif["err_fd"]
my_finished_notification = my_active_notif["notification"]
my_finished_action = my_active_notif["action"]
my_finished_notification_params = my_active_notif["params"]
# Close out/err files, if not already closed...
try:
my_finished_out_fd.close()
except IOError:
logger.warning("error closing stdout file for notification %s... continuing..."
% (my_finished_notification))
try:
my_finished_err_fd.close()
except IOError:
logger.warning("error closing stderr file for notification %s... continuing..."
% (my_finished_notification))
if self._notifications_log is not None:
if logger.isEnabledFor(logging.INFO):
self._notifications_log.write("%s\n" % ('-' * 80))
self._notifications_log.write("Notification time : %s\n" % (utils.isodate()))
self._notifications_log.write("Notification event : %s\n" % (my_finished_notification))
self._notifications_log.write("Notification action: %s\n" % (my_finished_action))
self._notifications_log.write("Notification status: %s\n" % (my_status))
self._notifications_log.write("\n")
self._notifications_log.write("Notification environment\n")
for k in my_finished_notification_params:
self._notifications_log.write("%s : %s\n" % (k, my_finished_notification_params[k]))
self._notifications_log.write("\n")
self._notifications_log.write("stdout:\n")
try:
my_f = open(my_finished_out_fn, 'r')
for line in my_f:
self._notifications_log.write(line)
except IOError:
logger.warning("error processing notification stdout file: %s. continuing..."
% (my_finished_out_fn))
else:
my_f.close()
self._notifications_log.write("\n")
self._notifications_log.write("stderr:\n")
try:
my_f = open(my_finished_err_fn, 'r')
for line in my_f:
self._notifications_log.write(line)
except IOError:
logger.warning("error processing notification stderr file: %s. continuing..."
% (my_finished_err_fn))
else:
my_f.close()
self._notifications_log.write("\n")
self._notifications_log.write("\n")
else:
# Only log a one-liner so we can debug things later if we need to
self._notifications_log.write("%s - %s - %s - %s\n" % (utils.isodate(),
my_finished_notification,
my_finished_action,
my_status))
else:
logger.critical("notifications' output log file not initialized... exiting...")
sys.exit(1)
# Now, delete output and error files
try:
os.unlink(my_finished_out_fn)
except OSError:
logger.warning("error deleting notification stdout file: %s. continuing..."
% (my_finished_out_fn))
try:
os.unlink(my_finished_err_fn)
except OSError:
logger.warning("error deleting notification stderr file: %s. continuing..."
% (my_finished_err_fn))
# Delete this notification from our list
my_deleted_entry = self._active_notifications.pop(my_notif_index)
else:
# Process still going... leave it...
my_notif_index = my_notif_index + 1
# Step 2: Look at our notification queue
while len(self._pending_notifications) > 0:
# Ok we have notifications to service...
# print "pending notifications: %s" % (len(self._pending_notifications))
logger.debug("pending notifications: %s" % (len(self._pending_notifications)))
# Check if we have reached the maximum number of concurrent notifications
if len(self._active_notifications) > self._max_parallel_notifications:
# print "reaching maximum number of concurrent notifications... waiting until next cycle..."
logger.info("reaching maximum number of concurrent notifications... waiting until next cycle...")
break
# Get first notification from the list
try:
my_action, my_env = self._pending_notifications.pop(0)
except IndexError:
logger.error("error processing notification list... exiting!")
sys.exit(1)
# Merge default environment with notification-specific environment
my_complete_env = os.environ.copy()
my_complete_env.update(my_env)
try:
my_notification = "%s - %s" % (my_env["PEGASUS_JOBID"], my_env["PEGASUS_EVENT"])
except KeyError:
logger.warning("notification missing PEGASUS_JOBID or PEGASUS_EVENT... skipping...")
continue
# Split arguments
my_args = shlex.split(my_action)
# Create output and error files for the notification script to use
try:
my_temp_out = tempfile.mkstemp(prefix="notification-", suffix="-out.log", dir="/tmp")
my_temp_err = tempfile.mkstemp(prefix="notification-", suffix="-err.log", dir="/tmp")
os.close(my_temp_out[0])
os.close(my_temp_err[0])
my_out_fn = my_temp_out[1]
my_err_fn = my_temp_err[1]
except OSError:
logger.warning("cannot create temp files for notification: %s... skipping..." % (my_notification))
continue
# Open output and error files for the notification script
try:
my_f_out = open(my_out_fn, 'w')
my_f_err = open(my_err_fn, 'w')
except IOError:
logger.warning("cannot open temp files for notification: %s... skipping..." % (my_notification))
try:
os.unlink(my_out_fn)
os.unlink(my_err_fn)
except OSError:
# No error here...
pass
continue
# Ok, here we go...
try:
my_p = subprocess.Popen(my_args, stdout=my_f_out, stderr=my_f_err, env=my_complete_env)
except OSError:
logger.warning("cannot start notification executable: %s... skipping..." % (my_notification))
try:
my_f_out.close()
my_f_err.close()
os.unlink(my_out_fn)
os.unlink(my_err_fn)
except OSError:
logger.warning("found problem cleaning up notification: %s... skipping..." % (my_notification))
continue
# Clean up ok, just continue
continue
except:
logger.warning("problem starting notification: %s... skipping..." % (my_notification))
try:
my_f_out.close()
my_f_err.close()
os.unlink(my_out_fn)
os.unlink(my_err_fn)
except OSError:
logger.warning("found problem cleaning up notification: %s... skipping..." % (my_notification))
continue
# Clean up ok, just continue
continue
# Let's keep everything we need for the future
my_started_notification = {}
my_started_notification["pid"] = my_p.pid
my_started_notification["subp"] = my_p
my_started_notification["env"] = my_complete_env
my_started_notification["params"] = my_env
my_started_notification["args"] = my_args
my_started_notification["action"] = my_action
my_started_notification["out_fd"] = my_f_out
my_started_notification["err_fd"] = my_f_err
my_started_notification["out_fn"] = my_out_fn
my_started_notification["err_fn"] = my_err_fn
my_started_notification["notification"] = my_notification
my_started_notification["time"] = time.time()
# Add to the active list, and done!
self._active_notifications.append(my_started_notification)
logger.info("started notification for: %s" % (my_notification))
# Step 3: Check if any notifications ran over the allowed time
if self._notifications_timeout > 0:
# Only go through the list if a timeout was specified
# Get current time
now = int(math.floor(time.time()))
# Go through our list
my_index = 0
while my_index < len(self._active_notifications):
my_entry = self._active_notifications[my_index]
my_exp_time = my_entry["time"] + self._notifications_timeout
# Check if notification has expired
if my_exp_time < now:
# Notification has expired... kill it...
logger.warning("notification expired... terminating it...")
self.terminate_notification(my_entry)
# Delete this notification from our list
my_deleted_entry = self._active_notifications.pop(my_index)
else:
# Notification hasn't expired yet, move to next one...
my_index = my_index + 1
def finish_notifications(self):
"""
This function flushes all notifications, and closes the
notifications' log file. It also logs all pending (but not yet
issued) notifications.
"""
# Take care of active notifications
if len(self._active_notifications) > 0:
for my_entry in self._active_notifications:
self.terminate_notification(my_entry)
# Take care of pending notifications
if len(self._pending_notifications) > 0:
for my_action, my_env in self._pending_notifications:
try:
my_notification = "%s - %s" % (my_env["PEGASUS_JOBID"], my_env["PEGASUS_EVENT"])
except KeyError:
logger.warning("notification missing PEGASUS_JOBID or PEGASUS_EVENT... skipping...")
continue
logger.warning("pending notification skipped: %s - %s" % (my_notification, my_action))
# Close notifications' log file
if self._notifications_log is not None:
try:
self._notifications_log.close()
except IOError:
logger.warning("error closing notifications' log file...")
self._notifications_log = None
def read_notification_file(self, notify_file, wf_uuid):
"""
This function reads the notification file, parsing all
notifications and creating our list of events to track.
It returns the number of notifications read from the
notifications' file.
"""
if notify_file is None:
return 0
logger.info("loading notifications from %s" % (notify_file))
# Open file
try:
NOTIFY = open(notify_file, "r")
except IOError:
logger.warning("cannot load notification file %s, continuing without notifications" % (notify_file))
return 0
# Start with empty dictionaries for the three types of notifications
my_notifications_read = 0
my_notifications = {"workflow" : {},
"job" : {},
"invocation": {}}
# For workflow and job notifications, we have a dict(workflow_id|job_id, dict(cond, [actions]))
# For invocation notifications, we have a dict(job_id, dict(inv_id, dict(cond, [actions])))
# Process notifications
for line in NOTIFY:
line = line.strip()
# Skip blank lines
if len(line) == 0:
continue
# Skip comments
if line.startswith("#"):
continue
# Check if we split it in 4 or 5 pieces
if line.lower().startswith("invocation"):
# This is an invocation notification, split and get all pieces
my_entry = line.split(None, 4)
if len(my_entry) != 5:
logger.warning("cannot parse notification: %s, skipping..." % (line))
continue
my_type = my_entry[0].lower()
my_id = my_entry[1]
try:
my_inv = int(my_entry[2])
except ValueError:
logger.warning("cannot parse notification: %s, skipping..." % (line))
continue
my_condition = my_entry[3]
my_action = my_entry[4]
else:
# This is a workflow/job notification, split and get all pieces
my_entry = line.split(None, 3)
if len(my_entry) != 4:
logger.warning("cannot parse notification: %s, skipping..." % (line))
continue
my_type = my_entry[0].lower()
my_id = my_entry[1]
my_condition = my_entry[2]
my_action = my_entry[3]
# Pick the right dictionary, depending on event type
if my_type == "workflow":
my_dict = my_notifications["workflow"]
if my_id != wf_uuid:
logger.warning("workflow notification has id %s, our id is %s, skipping..."
% (my_id, wf_uuid))
continue
elif my_type == "job" or my_type == "daxjob" or my_type == "dagjob":
my_dict = my_notifications["job"]
elif my_type == "invocation":
my_dict = my_notifications["invocation"]
else:
logger.warning("unknown notification type: %s, skipping..." % (line))
continue
logger.debug("loading notification: %s" % (line))
my_notifications_read = my_notifications_read + 1
# Make sure id is in dictionary
if not my_id in my_dict:
my_dict[my_id] = {}
# For invocations, one extra level...
if my_type == "invocation":
my_dict = my_dict[my_id]
if not my_inv in my_dict:
my_dict[my_inv] = {}
# Now add the notification condition, action pair
if not my_condition in my_dict[my_inv]:
# No actions, start with the list
my_dict[my_inv][my_condition] = [my_action]
else:
# We already have an action(s), let's add the new one to the list
my_dict[my_inv][my_condition].append(my_action)
else:
# Now add the notification condition, action pair
if not my_condition in my_dict[my_id]:
my_dict[my_id][my_condition] = [my_action]
else:
my_dict[my_id][my_condition].append(my_action)
# Save our notifications for later use...
if wf_uuid in self._notifications:
logger.debug("reloaded notifications for workflow %s" % (wf_uuid))
self._notifications[wf_uuid] = my_notifications
# Close file
try:
NOTIFY.close()
except IOError:
pass
# Return number of notifications read
logger.debug("loaded %d notifications for workflow %s" % (my_notifications_read, wf_uuid))
return my_notifications_read
def process_workflow_notifications(self, wf, state):
"""
This function takes care of processing workflow-level notifications.
"""
# Check if we have notifications for this workflow
if not wf._wf_uuid in self._notifications:
return
# Get the notifications' dictionary for this workflow id
wf_notifications = self._notifications[wf._wf_uuid]
if "workflow" in wf_notifications:
my_dict = wf_notifications["workflow"]
if len(my_dict) == 0:
# No workflow notifications
return
else:
logger.warning("notification structure missing workflow entry...")
return
# Our workflow is must be in there...
if wf._wf_uuid in my_dict:
my_notifications = my_dict[wf._wf_uuid]
else:
logger.warning("notification has mismatching workflow id: %s different from %s" %
(wf._wf_uuid, str(my_dict)))
return
# Sanity check the state...
if state != "start" and state != "end":
logger.warning("unknown workflow state %s, continuing..." % (state))
return
# Now, match the workflow state to the conditions in the notifications...
for k in my_notifications:
# Look up the actions for this notification now
my_actions = my_notifications[k]
if state == "start":
if k != "start" and k != "all":
continue
# Change k == 'all' to 'start'
k = "start"
if state == "end":
if k == "on_error":
if wf._dagman_exit_code == 0:
continue
elif k == "on_success":
if wf._dagman_exit_code != 0:
continue
elif k != "at_end" and k != "all":
continue
if k == "all":
k = "at_end"
# Ok, we have a match!
for action in my_actions:
# Create dictionary with needed environment variables
my_env = {}
my_env["PEGASUS_EVENT"] = k
my_env["PEGASUS_EVENT_TIMESTAMP"] = str(wf._current_timestamp)
my_env["PEGASUS_EVENT_TIMESTAMP_ISO"] = utils.isodate(wf._current_timestamp)
my_env["PEGASUS_SUBMIT_DIR"] = wf._original_submit_dir
my_env["PEGASUS_STDOUT"] = wf._out_file
my_env["PEGASUS_JOBID"] = wf._wf_uuid
my_env["PEGASUS_WFID"] = ((wf._dax_label or "unknown") +
"-" + (wf._dax_index or "unknown"))
if state == "end":
# Workflow status is already in plain format, no need for conversion
my_env["PEGASUS_STATUS"] = str(wf._dagman_exit_code)
# Done, queue the notification
self._pending_notifications.append((action, my_env))
# print "WORKFLOW NOTIFICATION ---> ", action, my_env
def process_job_notifications(self, wf, state, job, status):
"""
This function takes care of processing job-level notifications.
"""
# Check if we have notifications for this workflow
if not wf._wf_uuid in self._notifications:
return
# Get the notifications' dictionary for this workflow id
wf_notifications = self._notifications[wf._wf_uuid]
if "job" in wf_notifications:
my_dict = wf_notifications["job"]
else:
logger.warning("notification structure missing job entry...")
return
# Check if we have notifications for this job
if not job._exec_job_id in my_dict:
return
my_notifications = my_dict[job._exec_job_id]
if job._exec_job_id in wf._job_info:
if wf._job_info[job._exec_job_id][3] is None:
job_has_post_script = False
else:
job_has_post_script = True
else:
logger.warning("cannot find job %s in job_info database... skipping notification..." % (job._exec_job_id))
return
# Now, match the job state to the conditions in the notifications...
for k in my_notifications:
# Look up the actions for this notification now
my_actions = my_notifications[k]
if state == "EXECUTE":
if k != "start" and k != "all":
continue
# Change k to "start"
k = "start"
my_status = None
elif state == "JOB_SUCCESS":
if job_has_post_script:
# Wait till postscript...
continue
if k == "start" or k == "on_error":
continue
if k == "all":
k = "at_end"
my_status = "0"
elif state == "POST_SCRIPT_SUCCESS":
if k == "start" or k == "on_error":
continue
if k == "all":
k = "at_end"
my_status = "0"
elif state == "JOB_FAILURE":
if job_has_post_script:
# Wait till postscript...
continue
if k == "start" or k == "on_success":
continue
if k == "all":
k = "at_end"
my_status = status
elif state == "POST_SCRIPT_FAILURE":
if k == "start" or k == "on_success":
continue
if k == "all":
k = "at_end"
my_status = status
else:
# We are in some other state...
continue
my_output = os.path.join(wf._original_submit_dir, job._output_file)
my_error = os.path.join(wf._original_submit_dir, job._error_file)
# Use the rotated file names if at the end of the job
if k != "start":
my_output = my_output + ".%03d" % (job._job_output_counter)
my_error = my_error + ".%03d" % (job._job_output_counter)
# Ok, we have a match!
for action in my_actions:
# Create dictionary with needed environment variables
my_env = {}
my_env["PEGASUS_EVENT"] = k
my_env["PEGASUS_EVENT_TIMESTAMP"] = str(wf._current_timestamp)
my_env["PEGASUS_EVENT_TIMESTAMP_ISO"] = utils.isodate(wf._current_timestamp)
my_env["PEGASUS_SUBMIT_DIR"] = wf._original_submit_dir
my_env["PEGASUS_JOBID"] = job._exec_job_id
my_env["PEGASUS_WFID"] = ((wf._dax_label or "unknown") +
"-" + (wf._dax_index or "unknown"))
my_env["PEGASUS_STDOUT"] = my_output
my_env["PEGASUS_STDERR"] = my_error
if my_status is not None:
my_env["PEGASUS_STATUS"] = str(my_status)
# Done, queue the notification
self._pending_notifications.append((action, my_env))
# print "JOB NOTIFICATION ---> ", action, my_env
def process_invocation_notifications(self, wf, job, task_id, record=None):
"""
This function takes care of processing invocation-level notifications.
"""
if record is None:
record = {}
# Check if we have notifications for this workflow
if not wf._wf_uuid in self._notifications:
return
# Get the notifications' dictionary for this workflow id
wf_notifications = self._notifications[wf._wf_uuid]
if "invocation" in wf_notifications:
my_dict = wf_notifications["invocation"]
else:
logger.warning("notification structure missing invocation entry...")
return
# Check if we have notifications for this job
if not job._exec_job_id in my_dict:
return
# Advance to the task dictionary
my_dict = my_dict[job._exec_job_id]
# Check if we have notifications for this invocation
if not task_id in my_dict:
return
my_notifications = my_dict[task_id]
# Now, match the invocation state to the condition in the notification
for k in my_notifications:
# Look up the actions for this notification now
my_actions = my_notifications[k]
if "raw" in record:
my_status = record["raw"]
else:
my_status = job._main_job_exitcode
# Convert exitcode to int
try:
my_status = int(my_status)
except ValueError:
pass
# Now, compare to the notification condition(s)
if my_status == 0:
if k == "on_error":
continue
if my_status != 0:
if k == "on_success":
continue
if k == "all":
k = "at_end"
# Here, we always use the rotated file names as the invocation has already finished...
my_output = os.path.join(wf._original_submit_dir, job._output_file) + ".%03d" % (job._job_output_counter)
my_error = os.path.join(wf._original_submit_dir, job._error_file) + ".%03d" % (job._job_output_counter)
# Ok, we have a match!
for action in my_actions:
# Create dictionary with needed environment variables
my_env = {}
my_env["PEGASUS_EVENT"] = k
my_env["PEGASUS_EVENT_TIMESTAMP"] = str(wf._current_timestamp)
my_env["PEGASUS_EVENT_TIMESTAMP_ISO"] = utils.isodate(wf._current_timestamp)
my_env["PEGASUS_SUBMIT_DIR"] = wf._original_submit_dir
my_env["PEGASUS_JOBID"] = job._exec_job_id
my_env["PEGASUS_INVID"] = str(task_id)
my_env["PEGASUS_WFID"] = ((wf._dax_label or "unknown") +
"-" + (wf._dax_index or "unknown"))
my_env["PEGASUS_STDOUT"] = my_output
my_env["PEGASUS_STDERR"] = my_error
if k != "start":
# Convert raw exitcode into human-parseable format
my_env["PEGASUS_STATUS"] = str(utils.raw_to_regular(my_status))
# Done, queue the notification
self._pending_notifications.append((action, my_env))
# print "INVOCATION NOTIFICATION ---> ", action, my_env
def remove_notifications(self, wf_uuid):
"""
This function removes the notifications for workflow wf_uuid
from our _notifications dictionary.
"""
# Check if we have notifications for this workflow
if not wf_uuid in self._notifications:
return
logger.debug("deleting notifications for workflow %s..." % (wf_uuid))
# Delete them from our dictionary
del self._notifications[wf_uuid]
| gpl-3.0 | 1,688,029,379,072,213,500 | 43.096525 | 118 | 0.516038 | false |
amolenaar/gaphor | gaphor/tests/testcase.py | 1 | 5292 | """
Basic test case for Gaphor tests.
Everything is about services so the TestCase can define it's required
services and start off.
"""
import logging
import unittest
from io import StringIO
from typing import Type, TypeVar
from gaphas.aspect import ConnectionSink
from gaphas.aspect import Connector as ConnectorAspect
# For DiagramItemConnector aspect:
import gaphor.diagram.diagramtools # noqa
from gaphor import UML
from gaphor.application import Session
from gaphor.diagram.connectors import Connector
from gaphor.diagram.grouping import Group
T = TypeVar("T")
log = logging.getLogger("Gaphor")
log.setLevel(logging.WARNING)
class TestCase(unittest.TestCase):
services = [
"event_manager",
"component_registry",
"element_factory",
"element_dispatcher",
"modeling_language",
"sanitizer",
]
def setUp(self):
self.session = Session(services=self.services)
self.element_factory = self.session.get_service("element_factory")
self.modeling_language = self.session.get_service("modeling_language")
assert len(list(self.element_factory.select())) == 0, list(
self.element_factory.select()
)
self.diagram = self.element_factory.create(UML.Diagram)
assert len(list(self.element_factory.select())) == 1, list(
self.element_factory.select()
)
def tearDown(self):
self.element_factory.shutdown()
self.session.shutdown()
def get_service(self, name):
return self.session.get_service(name)
def create(self, item_cls: Type[T], subject_cls=None, subject=None) -> T:
"""
Create an item with specified subject.
"""
if subject_cls is not None:
subject = self.element_factory.create(subject_cls)
item = self.diagram.create(item_cls, subject=subject)
self.diagram.canvas.update()
return item
def allow(self, line, handle, item, port=None):
"""
Glue line's handle to an item.
If port is not provided, then first port is used.
"""
if port is None and len(item.ports()) > 0:
port = item.ports()[0]
adapter = Connector(item, line)
return adapter.allow(handle, port)
def connect(self, line, handle, item, port=None):
"""
Connect line's handle to an item.
If port is not provided, then first port is used.
"""
canvas = line.canvas
assert canvas is item.canvas
if port is None and len(item.ports()) > 0:
port = item.ports()[0]
sink = ConnectionSink(item, port)
connector = ConnectorAspect(line, handle)
connector.connect(sink)
cinfo = canvas.get_connection(handle)
assert cinfo.connected is item
assert cinfo.port is port
def disconnect(self, line, handle):
"""
Disconnect line's handle.
"""
canvas = self.diagram.canvas
# disconnection on adapter level is performed due to callback, so
# no adapter look up here
canvas.disconnect_item(line, handle)
assert not canvas.get_connection(handle)
def get_connected(self, handle):
"""
Get item connected to line via handle.
"""
cinfo = self.diagram.canvas.get_connection(handle)
if cinfo:
return cinfo.connected
return None
def get_connection(self, handle):
"""
Get connection information.
"""
return self.diagram.canvas.get_connection(handle)
def can_group(self, parent, item):
"""
Check if an item can be grouped by parent.
"""
adapter = Group(parent, item)
return adapter.can_contain()
def group(self, parent, item):
"""
Group item within a parent.
"""
self.diagram.canvas.reparent(item, parent)
adapter = Group(parent, item)
adapter.group()
def ungroup(self, parent, item):
"""
Remove item from a parent.
"""
adapter = Group(parent, item)
adapter.ungroup()
self.diagram.canvas.reparent(item, None)
def kindof(self, cls):
"""
Find UML metaclass instances using element factory.
"""
return self.element_factory.lselect(cls)
def save(self):
"""
Save diagram into string.
"""
from gaphor.storage import storage
from gaphor.storage.xmlwriter import XMLWriter
f = StringIO()
storage.save(XMLWriter(f), factory=self.element_factory)
data = f.getvalue()
f.close()
self.element_factory.flush()
assert not list(self.element_factory.select())
assert not list(self.element_factory.lselect())
return data
def load(self, data):
"""
Load data from specified string. Update ``TestCase.diagram``
attribute to hold new loaded diagram.
"""
from gaphor.storage import storage
f = StringIO(data)
storage.load(
f, factory=self.element_factory, modeling_language=self.modeling_language
)
f.close()
self.diagram = self.element_factory.lselect(UML.Diagram)[0]
| lgpl-2.1 | 5,051,515,213,022,079,000 | 27.605405 | 85 | 0.612245 | false |
xorpaul/shinken | test/test_poller_addition.py | 1 | 10776 | #!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class GoodArbiter(ArbiterLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def have_conf(self, i):
return True
def do_not_run(self):
pass
class GoodScheduler(SchedulerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def have_conf(self, i):
return True
def put_conf(self, conf):
return True
class BadScheduler(SchedulerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
def have_conf(self, i):
return False
class GoodPoller(PollerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def put_conf(self, conf):
return True
class BadPoller(PollerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
class GoodReactionner(ReactionnerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def put_conf(self, conf):
return True
class BadReactionner(ReactionnerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
class GoodBroker(BrokerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def put_conf(self, conf):
return True
class BadBroker(BrokerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
class TestPollerAddition(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/nagios_dispatcher.cfg')
def test_simple_dispatch_and_addition(self):
print "The dispatcher", self.dispatcher
# dummy for the arbiter
for a in self.conf.arbiters:
a.__class__ = GoodArbiter
print "Preparing schedulers"
scheduler1 = self.conf.schedulers.find_by_name('scheduler-all-1')
self.assert_(scheduler1 is not None)
scheduler1.__class__ = GoodScheduler
scheduler2 = self.conf.schedulers.find_by_name('scheduler-all-2')
self.assert_(scheduler2 is not None)
scheduler2.__class__ = BadScheduler
print "Preparing pollers"
poller1 = self.conf.pollers.find_by_name('poller-all-1')
self.assert_(poller1 is not None)
poller1.__class__ = GoodPoller
poller2 = self.conf.pollers.find_by_name('poller-all-2')
self.assert_(poller2 is not None)
poller2.__class__ = BadPoller
print "Preparing reactionners"
reactionner1 = self.conf.reactionners.find_by_name('reactionner-all-1')
self.assert_(reactionner1 is not None)
reactionner1.__class__ = GoodReactionner
reactionner2 = self.conf.reactionners.find_by_name('reactionner-all-2')
self.assert_(reactionner2 is not None)
reactionner2.__class__ = BadReactionner
print "Preparing brokers"
broker1 = self.conf.brokers.find_by_name('broker-all-1')
self.assert_(broker1 is not None)
broker1.__class__ = GoodBroker
broker2 = self.conf.brokers.find_by_name('broker-all-2')
self.assert_(broker2 is not None)
broker2.__class__ = BadBroker
# Ping all elements. Should have 1 as OK, 2 as
# one bad attempt (3 max)
self.dispatcher.check_alive()
# Check good values
self.assert_(scheduler1.alive == True)
self.assert_(scheduler1.attempt == 0)
self.assert_(scheduler1.reachable == True)
# still alive, just unreach
self.assert_(scheduler2.alive == True)
self.assert_(scheduler2.attempt == 1)
self.assert_(scheduler2.reachable == False)
# and others satellites too
self.assert_(poller1.alive == True)
self.assert_(poller1.attempt == 0)
self.assert_(poller1.reachable == True)
# still alive, just unreach
self.assert_(poller2.alive == True)
self.assert_(poller2.attempt == 1)
self.assert_(poller2.reachable == False)
# and others satellites too
self.assert_(reactionner1.alive == True)
self.assert_(reactionner1.attempt == 0)
self.assert_(reactionner1.reachable == True)
# still alive, just unreach
self.assert_(reactionner2.alive == True)
self.assert_(reactionner2.attempt == 1)
self.assert_(reactionner2.reachable == False)
# and others satellites too
self.assert_(broker1.alive == True)
self.assert_(broker1.attempt == 0)
self.assert_(broker1.reachable == True)
# still alive, just unreach
self.assert_(broker2.alive == True)
self.assert_(broker2.attempt == 1)
self.assert_(broker2.reachable == False)
time.sleep(60)
### Now add another attempt, still alive, but attemp=2/3
self.dispatcher.check_alive()
# Check good values
self.assert_(scheduler1.alive == True)
self.assert_(scheduler1.attempt == 0)
self.assert_(scheduler1.reachable == True)
# still alive, just unreach
self.assert_(scheduler2.alive == True)
self.assert_(scheduler2.attempt == 2)
self.assert_(scheduler2.reachable == False)
# and others satellites too
self.assert_(poller1.alive == True)
self.assert_(poller1.attempt == 0)
self.assert_(poller1.reachable == True)
# still alive, just unreach
self.assert_(poller2.alive == True)
self.assert_(poller2.attempt == 2)
self.assert_(poller2.reachable == False)
# and others satellites too
self.assert_(reactionner1.alive == True)
self.assert_(reactionner1.attempt == 0)
self.assert_(reactionner1.reachable == True)
# still alive, just unreach
self.assert_(reactionner2.alive == True)
self.assert_(reactionner2.attempt == 2)
self.assert_(reactionner2.reachable == False)
# and others satellites too
self.assert_(broker1.alive == True)
self.assert_(broker1.attempt == 0)
self.assert_(broker1.reachable == True)
# still alive, just unreach
self.assert_(broker2.alive == True)
self.assert_(broker2.attempt == 2)
self.assert_(broker2.reachable == False)
time.sleep(60)
### Now we get BAD, We go DEAD for N2!
self.dispatcher.check_alive()
# Check good values
self.assert_(scheduler1.alive == True)
self.assert_(scheduler1.attempt == 0)
self.assert_(scheduler1.reachable == True)
# still alive, just unreach
self.assert_(scheduler2.alive == False)
self.assert_(scheduler2.attempt == 3)
self.assert_(scheduler2.reachable == False)
# and others satellites too
self.assert_(poller1.alive == True)
self.assert_(poller1.attempt == 0)
self.assert_(poller1.reachable == True)
# still alive, just unreach
self.assert_(poller2.alive == False)
self.assert_(poller2.attempt == 3)
self.assert_(poller2.reachable == False)
# and others satellites too
self.assert_(reactionner1.alive == True)
self.assert_(reactionner1.attempt == 0)
self.assert_(reactionner1.reachable == True)
# still alive, just unreach
self.assert_(reactionner2.alive == False)
self.assert_(reactionner2.attempt == 3)
self.assert_(reactionner2.reachable == False)
# and others satellites too
self.assert_(broker1.alive == True)
self.assert_(broker1.attempt == 0)
self.assert_(broker1.reachable == True)
# still alive, just unreach
self.assert_(broker2.alive == False)
self.assert_(broker2.attempt == 3)
self.assert_(broker2.reachable == False)
# Now we check how we should dispatch confs
self.dispatcher.check_dispatch()
# the conf should not be in a good shape
self.assert_(self.dispatcher.dispatch_ok == False)
# Now we really dispatch them!
self.dispatcher.dispatch()
self.assert_(self.any_log_match('Dispatch OK of conf in scheduler scheduler-all-1'))
self.assert_(self.any_log_match('Dispatch OK of configuration 0 to reactionner reactionner-all-1'))
self.assert_(self.any_log_match('Dispatch OK of configuration 0 to poller poller-all-1'))
self.assert_(self.any_log_match('Dispatch OK of configuration 0 to broker broker-all-1'))
self.clear_logs()
# And look if we really dispatch conf as we should
for r in self.conf.realms:
for cfg in r.confs.values():
self.assert_(cfg.is_assigned == True)
self.assert_(cfg.assigned_to == scheduler1)
cmd = "[%lu] ADD_SIMPLE_POLLER;All;newpoller;localhost;7771" % int(time.time())
ext_cmd = ExternalCommand(cmd)
self.external_command_dispatcher.resolve_command(ext_cmd)
# Look for the poller now
newpoller = self.conf.pollers.find_by_name('newpoller')
self.assert_(newpoller is not None)
newpoller.__class__ = GoodPoller
### Wht now with our new poller object?
self.dispatcher.check_alive()
# Check good values
self.assert_(newpoller.alive == True)
self.assert_(newpoller.attempt == 0)
self.assert_(newpoller.reachable == True)
# Now we check how we should dispatch confs
self.dispatcher.check_bad_dispatch()
self.dispatcher.dispatch()
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -7,498,535,806,700,508,000 | 32.780564 | 107 | 0.62964 | false |
fintech-circle/edx-platform | lms/djangoapps/instructor_task/tests/test_tasks.py | 1 | 23409 | """
Unit tests for LMS instructor-initiated background tasks.
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
from functools import partial
import json
from uuid import uuid4
from celery.states import SUCCESS, FAILURE
import ddt
from django.utils.translation import ugettext_noop
from mock import Mock, MagicMock, patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import i4xEncoder
from courseware.models import StudentModule
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.exceptions import ItemNotFoundError
from lms.djangoapps.instructor_task.exceptions import UpdateProblemModuleStateError
from lms.djangoapps.instructor_task.models import InstructorTask
from lms.djangoapps.instructor_task.tests.test_base import InstructorTaskModuleTestCase
from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
from lms.djangoapps.instructor_task.tasks import (
rescore_problem,
reset_problem_attempts,
delete_problem_state,
generate_certificates,
export_ora2_data,
)
from lms.djangoapps.instructor_task.tasks_helper.misc import upload_ora2_data
PROBLEM_URL_NAME = "test_urlname"
class TestTaskFailure(Exception):
"""
An example exception to indicate failure of a mocked task.
"""
pass
class TestInstructorTasks(InstructorTaskModuleTestCase):
"""
Ensure tasks behave as expected.
"""
def setUp(self):
super(TestInstructorTasks, self).setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
self.location = self.problem_location(PROBLEM_URL_NAME)
def _create_input_entry(self, student_ident=None, use_problem_url=True, course_id=None, only_if_higher=False):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
task_input = {'only_if_higher': only_if_higher}
if use_problem_url:
task_input['problem_url'] = self.location
if student_ident is not None:
task_input['student'] = student_ident
course_id = course_id or self.course.id
instructor_task = InstructorTaskFactory.create(course_id=course_id,
requester=self.instructor,
task_input=json.dumps(task_input, cls=i4xEncoder),
task_key='dummy value',
task_id=task_id)
return instructor_task
def _get_xmodule_instance_args(self):
"""
Calculate dummy values for parameters needed for instantiating xmodule instances.
"""
return {
'xqueue_callback_url_prefix': 'dummy_value',
'request_info': {
'username': 'dummy_username',
'user_id': 'dummy_id',
},
}
def _run_task_with_mock_celery(self, task_class, entry_id, task_id, expected_failure_message=None):
"""Submit a task and mock how celery provides a current_task."""
self.current_task = Mock()
self.current_task.request = Mock()
self.current_task.request.id = task_id
self.current_task.update_state = Mock()
if expected_failure_message is not None:
self.current_task.update_state.side_effect = TestTaskFailure(expected_failure_message)
task_args = [entry_id, self._get_xmodule_instance_args()]
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_get_task:
mock_get_task.return_value = self.current_task
return task_class.apply(task_args, task_id=task_id).get()
def _test_missing_current_task(self, task_class):
"""Check that a task_class fails when celery doesn't provide a current_task."""
task_entry = self._create_input_entry()
with self.assertRaises(ValueError):
task_class(task_entry.id, self._get_xmodule_instance_args())
def _test_undefined_course(self, task_class):
"""Run with celery, but with no course defined."""
task_entry = self._create_input_entry(course_id="bogus/course/id")
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
def _test_undefined_problem(self, task_class):
"""Run with celery, but no problem defined."""
task_entry = self._create_input_entry()
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
def _test_run_with_task(self, task_class, action_name, expected_num_succeeded,
expected_num_skipped=0, expected_attempted=0, expected_total=0):
"""Run a task and check the number of StudentModules processed."""
task_entry = self._create_input_entry()
status = self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
expected_attempted = expected_attempted \
if expected_attempted else expected_num_succeeded + expected_num_skipped
expected_total = expected_total \
if expected_total else expected_num_succeeded + expected_num_skipped
# check return value
self.assertEquals(status.get('attempted'), expected_attempted)
self.assertEquals(status.get('succeeded'), expected_num_succeeded)
self.assertEquals(status.get('skipped'), expected_num_skipped)
self.assertEquals(status.get('total'), expected_total)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater(status.get('duration_ms'), 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
self.assertEquals(entry.task_state, SUCCESS)
def _test_run_with_no_state(self, task_class, action_name):
"""Run with no StudentModules defined for the current problem."""
self.define_option_problem(PROBLEM_URL_NAME)
self._test_run_with_task(task_class, action_name, 0)
def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):
"""Create students, a problem, and StudentModule objects for testing"""
self.define_option_problem(PROBLEM_URL_NAME)
students = [
UserFactory.create(username='robot%d' % i, email='robot+test+%[email protected]' % i)
for i in xrange(num_students)
]
for student in students:
CourseEnrollmentFactory.create(course_id=self.course.id, user=student)
StudentModuleFactory.create(course_id=self.course.id,
module_state_key=self.location,
student=student,
grade=grade,
max_grade=max_grade,
state=state)
return students
def _assert_num_attempts(self, students, num_attempts):
"""Check the number attempts for all students is the same"""
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
state = json.loads(module.state)
self.assertEquals(state['attempts'], num_attempts)
def _test_run_with_failure(self, task_class, expected_message):
"""Run a task and trigger an artificial failure with the given message."""
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message)
def _test_run_with_long_error_msg(self, task_class):
"""
Run with an error message that is so long it will require
truncation (as well as the jettisoning of the traceback).
"""
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 1500
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + "...")
self.assertNotIn('traceback', output)
def _test_run_with_short_error_msg(self, task_class):
"""
Run with an error message that is short enough to fit
in the output, but long enough that the traceback won't.
Confirm that the traceback is truncated.
"""
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 900
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message)
self.assertEquals(output['traceback'][-3:], "...")
@attr(shard=3)
@ddt.ddt
class TestRescoreInstructorTask(TestInstructorTasks):
"""Tests problem-rescoring instructor task."""
def assert_task_output(self, output, **expected_output):
"""
Check & compare output of the task
"""
self.assertEqual(output.get('total'), expected_output.get('total'))
self.assertEqual(output.get('attempted'), expected_output.get('attempted'))
self.assertEqual(output.get('succeeded'), expected_output.get('succeeded'))
self.assertEqual(output.get('skipped'), expected_output.get('skipped'))
self.assertEqual(output.get('failed'), expected_output.get('failed'))
self.assertEqual(output.get('action_name'), expected_output.get('action_name'))
self.assertGreater(output.get('duration_ms'), expected_output.get('duration_ms', 0))
def get_task_output(self, task_id):
"""Get and load instructor task output"""
entry = InstructorTask.objects.get(id=task_id)
return json.loads(entry.task_output)
def test_rescore_missing_current_task(self):
self._test_missing_current_task(rescore_problem)
def test_rescore_undefined_course(self):
self._test_undefined_course(rescore_problem)
def test_rescore_undefined_problem(self):
self._test_undefined_problem(rescore_problem)
def test_rescore_with_no_state(self):
self._test_run_with_no_state(rescore_problem, 'rescored')
def test_rescore_with_failure(self):
self._test_run_with_failure(rescore_problem, 'We expected this to fail')
def test_rescore_with_long_error_msg(self):
self._test_run_with_long_error_msg(rescore_problem)
def test_rescore_with_short_error_msg(self):
self._test_run_with_short_error_msg(rescore_problem)
def test_rescoring_unrescorable(self):
input_state = json.dumps({'done': True})
num_students = 1
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = MagicMock()
del mock_instance.rescore_problem
del mock_instance.rescore
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
with self.assertRaises(UpdateProblemModuleStateError):
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
# check values stored in table:
entry = InstructorTask.objects.get(id=task_entry.id)
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], "UpdateProblemModuleStateError")
self.assertEquals(output['message'], "Specified problem does not support rescoring.")
self.assertGreater(len(output['traceback']), 0)
def test_rescoring_unaccessable(self):
"""
Tests rescores a problem in a course, for all students fails if user has answered a
problem to which user does not have access to.
"""
input_state = json.dumps({'done': True})
num_students = 1
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal', return_value=None):
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='rescored'
)
def test_rescoring_success(self):
"""
Tests rescores a problem in a course, for all students succeeds.
"""
mock_instance = MagicMock()
getattr(mock_instance, 'rescore').return_value = None
mock_instance.has_submitted_answer.return_value = True
del mock_instance.done # old CAPA code used to use this value so we delete it here to be sure
num_students = 10
self._create_students_with_state(num_students)
task_entry = self._create_input_entry()
with patch(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
) as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=num_students,
skipped=0,
failed=0,
action_name='rescored'
)
@attr(shard=3)
class TestResetAttemptsInstructorTask(TestInstructorTasks):
"""Tests instructor task that resets problem attempts."""
def test_reset_missing_current_task(self):
self._test_missing_current_task(reset_problem_attempts)
def test_reset_undefined_course(self):
self._test_undefined_course(reset_problem_attempts)
def test_reset_undefined_problem(self):
self._test_undefined_problem(reset_problem_attempts)
def test_reset_with_no_state(self):
self._test_run_with_no_state(reset_problem_attempts, 'reset')
def test_reset_with_failure(self):
self._test_run_with_failure(reset_problem_attempts, 'We expected this to fail')
def test_reset_with_long_error_msg(self):
self._test_run_with_long_error_msg(reset_problem_attempts)
def test_reset_with_short_error_msg(self):
self._test_run_with_short_error_msg(reset_problem_attempts)
def test_reset_with_some_state(self):
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def test_reset_with_zero_attempts(self):
initial_attempts = 0
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', 0, expected_num_skipped=num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def _test_reset_with_student(self, use_email):
"""Run a reset task for one student, with several StudentModules for the problem defined."""
num_students = 10
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
state = json.loads(module.state)
self.assertEquals(state['attempts'], initial_attempts)
if use_email:
student_ident = students[3].email
else:
student_ident = students[3].username
task_entry = self._create_input_entry(student_ident)
status = self._run_task_with_mock_celery(reset_problem_attempts, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(status.get('attempted'), 1)
self.assertEquals(status.get('succeeded'), 1)
self.assertEquals(status.get('total'), 1)
self.assertEquals(status.get('action_name'), 'reset')
self.assertGreater(status.get('duration_ms'), 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
self.assertEquals(entry.task_state, SUCCESS)
# check that the correct entry was reset
for index, student in enumerate(students):
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
state = json.loads(module.state)
if index == 3:
self.assertEquals(state['attempts'], 0)
else:
self.assertEquals(state['attempts'], initial_attempts)
def test_reset_with_student_username(self):
self._test_reset_with_student(False)
def test_reset_with_student_email(self):
self._test_reset_with_student(True)
@attr(shard=3)
class TestDeleteStateInstructorTask(TestInstructorTasks):
"""Tests instructor task that deletes problem state."""
def test_delete_missing_current_task(self):
self._test_missing_current_task(delete_problem_state)
def test_delete_undefined_course(self):
self._test_undefined_course(delete_problem_state)
def test_delete_undefined_problem(self):
self._test_undefined_problem(delete_problem_state)
def test_delete_with_no_state(self):
self._test_run_with_no_state(delete_problem_state, 'deleted')
def test_delete_with_failure(self):
self._test_run_with_failure(delete_problem_state, 'We expected this to fail')
def test_delete_with_long_error_msg(self):
self._test_run_with_long_error_msg(delete_problem_state)
def test_delete_with_short_error_msg(self):
self._test_run_with_short_error_msg(delete_problem_state)
def test_delete_with_some_state(self):
# This will create StudentModule entries -- we don't have to worry about
# the state inside them.
num_students = 10
students = self._create_students_with_state(num_students)
# check that entries were created correctly
for student in students:
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
self._test_run_with_task(delete_problem_state, 'deleted', num_students)
# confirm that no state can be found anymore:
for student in students:
with self.assertRaises(StudentModule.DoesNotExist):
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
class TestCertificateGenerationnstructorTask(TestInstructorTasks):
"""Tests instructor task that generates student certificates."""
def test_generate_certificates_missing_current_task(self):
"""
Test error is raised when certificate generation task run without current task
"""
self._test_missing_current_task(generate_certificates)
def test_generate_certificates_task_run(self):
"""
Test certificate generation task run without any errors
"""
self._test_run_with_task(
generate_certificates,
'certificates generated',
0,
0,
expected_attempted=1,
expected_total=1
)
class TestOra2ResponsesInstructorTask(TestInstructorTasks):
"""Tests instructor task that fetches ora2 response data."""
def test_ora2_missing_current_task(self):
self._test_missing_current_task(export_ora2_data)
def test_ora2_with_failure(self):
self._test_run_with_failure(export_ora2_data, 'We expected this to fail')
def test_ora2_with_long_error_msg(self):
self._test_run_with_long_error_msg(export_ora2_data)
def test_ora2_with_short_error_msg(self):
self._test_run_with_short_error_msg(export_ora2_data)
def test_ora2_runs_task(self):
task_entry = self._create_input_entry()
task_xmodule_args = self._get_xmodule_instance_args()
with patch('lms.djangoapps.instructor_task.tasks.run_main_task') as mock_main_task:
export_ora2_data(task_entry.id, task_xmodule_args)
action_name = ugettext_noop('generated')
task_fn = partial(upload_ora2_data, task_xmodule_args)
mock_main_task.assert_called_once_with_args(task_entry.id, task_fn, action_name)
| agpl-3.0 | -2,608,810,769,016,951,000 | 43.167925 | 133 | 0.644538 | false |
asajeffrey/servo | tests/wpt/web-platform-tests/tools/lint/rules.py | 3 | 15945 | from __future__ import unicode_literals
import abc
import inspect
import os
import re
import six
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from typing import Any, List, Match, Optional, Pattern, Text, Tuple, cast
Error = Tuple[Text, Text, Text, Optional[int]]
def collapse(text):
# type: (Text) -> Text
return inspect.cleandoc(str(text)).replace("\n", " ")
class Rule(six.with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def name(self):
# type: () -> Text
pass
@abc.abstractproperty
def description(self):
# type: () -> Text
pass
to_fix = None # type: Optional[Text]
@classmethod
def error(cls, path, context=(), line_no=None):
# type: (Text, Tuple[Any, ...], Optional[int]) -> Error
if MYPY:
name = cast(Text, cls.name)
description = cast(Text, cls.description)
else:
name = cls.name
description = cls.description
description = description % context
return (name, description, path, line_no)
class MissingLink(Rule):
name = "MISSING-LINK"
description = "Testcase file must have a link to a spec"
to_fix = """
Ensure that there is a `<link rel="help" href="[url]">` for the spec.
`MISSING-LINK` is designed to ensure that the CSS build tool can find
the tests. Note that the CSS build system is primarily used by
[test.csswg.org/](http://test.csswg.org/), which doesn't use
`wptserve`, so `*.any.js` and similar tests won't work there; stick
with the `.html` equivalent.
"""
class PathLength(Rule):
name = "PATH LENGTH"
description = "/%s longer than maximum path length (%d > 150)"
to_fix = "use shorter filename to rename the test file"
class FileType(Rule):
name = "FILE TYPE"
description = "/%s is an unsupported file type (%s)"
class WorkerCollision(Rule):
name = "WORKER COLLISION"
description = collapse("""
path ends with %s which collides with generated tests from %s files
""")
class GitIgnoreFile(Rule):
name = "GITIGNORE"
description = ".gitignore found outside the root"
class MojomJSFile(Rule):
name = "MOJOM-JS"
description = "Don't check *.mojom.js files into WPT"
to_fix = """
Check if the file is already included in mojojs.zip:
https://source.chromium.org/chromium/chromium/src/+/master:chrome/tools/build/linux/FILES.cfg
If yes, use `loadMojoResources` from `resources/test-only-api.js` to load
it; if not, contact [email protected] for adding new files
to mojojs.zip.
"""
class AhemCopy(Rule):
name = "AHEM COPY"
description = "Don't add extra copies of Ahem, use /fonts/Ahem.ttf"
class AhemSystemFont(Rule):
name = "AHEM SYSTEM FONT"
description = "Don't use Ahem as a system font, use /fonts/ahem.css"
# TODO: Add tests for this rule
class IgnoredPath(Rule):
name = "IGNORED PATH"
description = collapse("""
%s matches an ignore filter in .gitignore - please add a .gitignore
exception
""")
class CSSCollidingTestName(Rule):
name = "CSS-COLLIDING-TEST-NAME"
description = "The filename %s in the %s testsuite is shared by: %s"
class CSSCollidingRefName(Rule):
name = "CSS-COLLIDING-REF-NAME"
description = "The filename %s is shared by: %s"
class CSSCollidingSupportName(Rule):
name = "CSS-COLLIDING-SUPPORT-NAME"
description = "The filename %s is shared by: %s"
class SupportWrongDir(Rule):
name = "SUPPORT-WRONG-DIR"
description = "Support file not in support directory"
class ParseFailed(Rule):
name = "PARSE-FAILED"
description = "Unable to parse file"
to_fix = """
examine the file to find the causes of any parse errors, and fix them.
"""
class ContentManual(Rule):
name = "CONTENT-MANUAL"
description = "Manual test whose filename doesn't end in '-manual'"
class ContentVisual(Rule):
name = "CONTENT-VISUAL"
description = "Visual test whose filename doesn't end in '-visual'"
class AbsoluteUrlRef(Rule):
name = "ABSOLUTE-URL-REF"
description = collapse("""
Reference test with a reference file specified via an absolute URL:
'%s'
""")
class SameFileRef(Rule):
name = "SAME-FILE-REF"
description = "Reference test which points at itself as a reference"
class NonexistentRef(Rule):
name = "NON-EXISTENT-REF"
description = collapse("""
Reference test with a non-existent '%s' relationship reference: '%s'
""")
class MultipleTimeout(Rule):
name = "MULTIPLE-TIMEOUT"
description = "More than one meta name='timeout'"
to_fix = """
ensure each test file has only one instance of a `<meta
name="timeout"...>` element
"""
class InvalidTimeout(Rule):
name = "INVALID-TIMEOUT"
description = collapse("""
Test file with `<meta name='timeout'...>` element that has a `content`
attribute whose value is not `long`: %s
""")
to_fix = "replace the value of the `content` attribute with `long`"
class MultipleTestharness(Rule):
name = "MULTIPLE-TESTHARNESS"
description = "More than one `<script src='/resources/testharness.js'>`"
to_fix = """
ensure each test has only one `<script
src='/resources/testharnessreport.js'>` instance
"""
class MissingTestharnessReport(Rule):
name = "MISSING-TESTHARNESSREPORT"
description = "Missing `<script src='/resources/testharnessreport.js'>`"
to_fix = """
ensure each test file contains `<script
src='/resources/testharnessreport.js'>`
"""
class MultipleTestharnessReport(Rule):
name = "MULTIPLE-TESTHARNESSREPORT"
description = "More than one `<script src='/resources/testharnessreport.js'>`"
class VariantMissing(Rule):
name = "VARIANT-MISSING"
description = collapse("""
Test file with a `<meta name='variant'...>` element that's missing a
`content` attribute
""")
to_fix = """
add a `content` attribute with an appropriate value to the `<meta
name='variant'...>` element
"""
class MalformedVariant(Rule):
name = "MALFORMED-VARIANT"
description = collapse("""
%s `<meta name=variant>` 'content' attribute must be the empty string
or start with '?' or '#'
""")
class LateTimeout(Rule):
name = "LATE-TIMEOUT"
description = "`<meta name=timeout>` seen after testharness.js script"
description = collapse("""
Test file with `<meta name='timeout'...>` element after `<script
src='/resources/testharnessreport.js'>` element
""")
to_fix = """
move the `<meta name="timeout"...>` element to precede the `script`
element.
"""
class EarlyTestharnessReport(Rule):
name = "EARLY-TESTHARNESSREPORT"
description = collapse("""
Test file has an instance of
`<script src='/resources/testharnessreport.js'>` prior to
`<script src='/resources/testharness.js'>`
""")
to_fix = "flip the order"
class EarlyTestdriverVendor(Rule):
name = "EARLY-TESTDRIVER-VENDOR"
description = collapse("""
Test file has an instance of
`<script src='/resources/testdriver-vendor.js'>` prior to
`<script src='/resources/testdriver.js'>`
""")
to_fix = "flip the order"
class MultipleTestdriver(Rule):
name = "MULTIPLE-TESTDRIVER"
description = "More than one `<script src='/resources/testdriver.js'>`"
class MissingTestdriverVendor(Rule):
name = "MISSING-TESTDRIVER-VENDOR"
description = "Missing `<script src='/resources/testdriver-vendor.js'>`"
class MultipleTestdriverVendor(Rule):
name = "MULTIPLE-TESTDRIVER-VENDOR"
description = "More than one `<script src='/resources/testdriver-vendor.js'>`"
class TestharnessPath(Rule):
name = "TESTHARNESS-PATH"
description = "testharness.js script seen with incorrect path"
class TestharnessReportPath(Rule):
name = "TESTHARNESSREPORT-PATH"
description = "testharnessreport.js script seen with incorrect path"
class TestdriverPath(Rule):
name = "TESTDRIVER-PATH"
description = "testdriver.js script seen with incorrect path"
class TestdriverVendorPath(Rule):
name = "TESTDRIVER-VENDOR-PATH"
description = "testdriver-vendor.js script seen with incorrect path"
class OpenNoMode(Rule):
name = "OPEN-NO-MODE"
description = "File opened without providing an explicit mode (note: binary files must be read with 'b' in the mode flags)"
class UnknownGlobalMetadata(Rule):
name = "UNKNOWN-GLOBAL-METADATA"
description = "Unexpected value for global metadata"
class BrokenGlobalMetadata(Rule):
name = "BROKEN-GLOBAL-METADATA"
description = "Invalid global metadata: %s"
class UnknownTimeoutMetadata(Rule):
name = "UNKNOWN-TIMEOUT-METADATA"
description = "Unexpected value for timeout metadata"
class UnknownMetadata(Rule):
name = "UNKNOWN-METADATA"
description = "Unexpected kind of metadata"
class StrayMetadata(Rule):
name = "STRAY-METADATA"
description = "Metadata comments should start the file"
class IndentedMetadata(Rule):
name = "INDENTED-METADATA"
description = "Metadata comments should start the line"
class BrokenMetadata(Rule):
name = "BROKEN-METADATA"
description = "Metadata comment is not formatted correctly"
class TestharnessInOtherType(Rule):
name = "TESTHARNESS-IN-OTHER-TYPE"
description = "testharness.js included in a %s test"
class DuplicateBasenamePath(Rule):
name = "DUPLICATE-BASENAME-PATH"
description = collapse("""
File has identical basename path (path excluding extension) as
other file(s) (found extensions: %s)
""")
to_fix = "rename files so they have unique basename paths"
class TentativeDirectoryName(Rule):
name = "TENTATIVE-DIRECTORY-NAME"
description = "Directories for tentative tests must be named exactly 'tentative'"
to_fix = "rename directory to be called 'tentative'"
class Regexp(six.with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def pattern(self):
# type: () -> bytes
pass
@abc.abstractproperty
def name(self):
# type: () -> Text
pass
@abc.abstractproperty
def description(self):
# type: () -> Text
pass
file_extensions = None # type: Optional[List[Text]]
def __init__(self):
# type: () -> None
self._re = re.compile(self.pattern) # type: Pattern[bytes]
def applies(self, path):
# type: (Text) -> bool
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
# type: (bytes) -> Optional[Match[bytes]]
return self._re.search(line)
class TabsRegexp(Regexp):
pattern = b"^\t"
name = "INDENT TABS"
description = "Test-file line starts with one or more tab characters"
to_fix = "use spaces to replace any tab characters at beginning of lines"
class CRRegexp(Regexp):
pattern = b"\r$"
name = "CR AT EOL"
description = "Test-file line ends with CR (U+000D) character"
to_fix = """
reformat file so each line just has LF (U+000A) line ending (standard,
cross-platform "Unix" line endings instead of, e.g., DOS line endings).
"""
class SetTimeoutRegexp(Regexp):
pattern = br"setTimeout\s*\("
name = "SET TIMEOUT"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "setTimeout used"
to_fix = """
replace all `setTimeout(...)` calls with `step_timeout(...)` calls
"""
class W3CTestOrgRegexp(Regexp):
pattern = br"w3c\-test\.org"
name = "W3C-TEST.ORG"
description = "Test-file line has the string `w3c-test.org`"
to_fix = """
either replace the `w3c-test.org` string with the expression
`{{host}}:{{ports[http][0]}}` or a generic hostname like `example.org`
"""
class WebPlatformTestRegexp(Regexp):
pattern = br"web\-platform\.test"
name = "WEB-PLATFORM.TEST"
description = "Internal web-platform.test domain used"
to_fix = """
use [server-side substitution](https://web-platform-tests.org/writing-tests/server-pipes.html#sub),
along with the [`.sub` filename-flag](https://web-platform-tests.org/writing-tests/file-names.html#test-features),
to replace web-platform.test with `{{domains[]}}`
"""
class Webidl2Regexp(Regexp):
pattern = br"webidl2\.js"
name = "WEBIDL2.JS"
description = "Legacy webidl2.js script used"
class ConsoleRegexp(Regexp):
pattern = br"console\.[a-zA-Z]+\s*\("
name = "CONSOLE"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Test-file line has a `console.*(...)` call"
to_fix = """
remove the `console.*(...)` call (and in some cases, consider adding an
`assert_*` of some kind in place of it)
"""
class GenerateTestsRegexp(Regexp):
pattern = br"generate_tests\s*\("
name = "GENERATE_TESTS"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Test file line has a generate_tests call"
to_fix = "remove the call and call `test()` a number of times instead"
class PrintRegexp(Regexp):
pattern = br"print(?:\s|\s*\()"
name = "PRINT STATEMENT"
file_extensions = [".py"]
description = collapse("""
A server-side python support file contains a `print` statement
""")
to_fix = """
remove the `print` statement or replace it with something else that
achieves the intended effect (e.g., a logging call)
"""
class LayoutTestsRegexp(Regexp):
pattern = br"(eventSender|testRunner|internals)\."
name = "LAYOUTTESTS APIS"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "eventSender/testRunner/internals used; these are LayoutTests-specific APIs (WebKit/Blink)"
class MissingDepsRegexp(Regexp):
pattern = br"[^\w]/gen/"
name = "MISSING DEPENDENCY"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Chromium-specific content referenced"
to_fix = "Reimplement the test to use well-documented testing interfaces"
class SpecialPowersRegexp(Regexp):
pattern = b"SpecialPowers"
name = "SPECIALPOWERS API"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "SpecialPowers used; this is gecko-specific and not supported in wpt"
class TrailingWhitespaceRegexp(Regexp):
name = "TRAILING WHITESPACE"
description = "Whitespace at EOL"
pattern = b"[ \t\f\v]$"
to_fix = """Remove trailing whitespace from all lines in the file."""
class AssertThrowsRegexp(Regexp):
pattern = br"[^.]assert_throws\("
name = "ASSERT_THROWS"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Test-file line has an `assert_throws(...)` call"
to_fix = """Replace with `assert_throws_dom` or `assert_throws_js` or `assert_throws_exactly`"""
class PromiseRejectsRegexp(Regexp):
pattern = br"promise_rejects\("
name = "PROMISE_REJECTS"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Test-file line has a `promise_rejects(...)` call"
to_fix = """Replace with promise_rejects_dom or promise_rejects_js or `promise_rejects_exactly`"""
class AssertPreconditionRegexp(Regexp):
pattern = br"[^.]assert_precondition\("
name = "ASSERT-PRECONDITION"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Test-file line has an `assert_precondition(...)` call"
to_fix = """Replace with `assert_implements` or `assert_implements_optional`"""
| mpl-2.0 | 8,087,864,830,403,340,000 | 29.084906 | 127 | 0.64804 | false |
GuillaumeDD/AdventOfCode2016 | day08.py | 1 | 5304 | from __future__ import print_function # print utilities without systematic '\n' at EOL
import io
import re
# --- Day 8: Two-Factor Authentication ---
#
# You come across a door implementing what you can only assume is an
# implementation of two-factor authentication after a long game of
# requirements telephone.
#
# To get past the door, you first swipe a keycard (no problem; there was
# one on a nearby desk). Then, it displays a code on a little screen,
# and you type that code on a keypad. Then, presumably, the door
# unlocks.
#
# Unfortunately, the screen has been smashed. After a few minutes,
# you've taken everything apart and figured out how it works. Now you
# just have to work out what the screen would have displayed.
#
# The magnetic strip on the card you swiped encodes a series of
# instructions for the screen; these instructions are your puzzle
# input. The screen is 50 pixels wide and 6 pixels tall, all of which
# start off, and is capable of three somewhat peculiar operations:
#
# rect AxB turns on all of the pixels in a rectangle at the top-left of the screen which is A wide and B tall.
#
# rotate row y=A by B shifts all of the pixels in row A (0 is the top
# row) right by B pixels. Pixels that would fall off the right end
# appear at the left end of the row.
#
# rotate column x=A by B shifts all of the pixels in column A (0 is
# the left column) down by B pixels. Pixels that would fall off the
# bottom appear at the top of the column.
#
# For example, here is a simple sequence on a smaller screen:
#
# rect 3x2 creates a small rectangle in the top-left corner:
#
# ###....
# ###....
# .......
#
# rotate column x=1 by 1 rotates the second column down by one pixel:
#
# #.#....
# ###....
# .#.....
#
# rotate row y=0 by 4 rotates the top row right by four pixels:
#
# ....#.#
# ###....
# .#.....
#
# rotate column x=1 by 1 again rotates the second column down by one
# pixel, causing the bottom pixel to wrap back to the top:
#
# .#..#.#
# #.#....
# .#.....
#
# As you can see, this display technology is extremely powerful, and
# will soon dominate the tiny-code-displaying-screen market. That's what
# the advertisement on the back of the display tries to convince you,
# anyway.
#
# There seems to be an intermediate check of the voltage used by the
# display: after you swipe your card, if the screen did work, how many
# pixels should be lit?
#
# --- Part Two ---
#
# You notice that the screen is only capable of displaying capital
# letters; in the font it uses, each letter is 5 pixels wide and 6 tall.
#
# After you swipe your card, what code is the screen trying to display?
pattern_rect = re.compile('rect ([0-9]+)x([0-9]+)')
pattern_rotate_row = re.compile('rotate row y=([0-9]+) by ([0-9]+)')
pattern_rotate_column = re.compile('rotate column x=([0-9]+) by ([0-9]+)')
# Light statuses
ON = '#'
OFF = '.'
# beware of this initialisation!
# -> every cell should be a different string
SCREEN_WIDTH = 50
SCREEN_HEIGHT = 6
SCREEN = [[OFF for _ in range(SCREEN_WIDTH)] for _ in range(SCREEN_HEIGHT)]
def print_screen():
for line in SCREEN:
for col in line:
print(col, end='')
print()
def switch(light_status):
if light_status == ON:
return OFF
else:
return ON
def nb_ON():
"""
Computes the number of 'ON' lights in SCREEN
"""
count = 0
for i in range(SCREEN_HEIGHT):
for j in range(SCREEN_WIDTH):
if SCREEN[i][j] == ON:
count += 1
return count
def apply_command(command_line):
"""
Apply a given command line on SCREEN
"""
global SCREEN
rect = pattern_rect.match(command_line)
if rect is not None:
# RECT command
width = int(rect.group(1))
height = int(rect.group(2))
for i in range(height):
for j in range(width):
SCREEN[i][j] = switch(SCREEN[i][j])
else:
# ROTATE ROW command
rotate_row = pattern_rotate_row.match(command_line)
if rotate_row is not None:
y = int(rotate_row.group(1))
by = int(rotate_row.group(2))
new_line = [OFF for _ in range(SCREEN_WIDTH)]
for j in range(SCREEN_WIDTH):
next_j = (j+by) % SCREEN_WIDTH
new_line[next_j] = SCREEN[y][j]
for j,light in enumerate(new_line):
SCREEN[y][j] = light
else:
# ROTATE COLUMN command
rotate_column = pattern_rotate_column.match(command_line)
if rotate_column is not None:
x = int(rotate_column.group(1))
by = int(rotate_column.group(2))
new_column = [OFF for _ in range(SCREEN_HEIGHT)]
for i in range(SCREEN_HEIGHT):
next_i = (i+by) % SCREEN_HEIGHT
new_column[next_i] = SCREEN[i][x]
for i,light in enumerate(new_column):
SCREEN[i][x] = light
else:
print('Unable to match command')
with io.open('inputs/day08.txt', 'r') as f:
for line in f:
command = line.strip()
apply_command(command)
print_screen()
print('Number of pixels lit: {}'.format(nb_ON()))
| gpl-3.0 | -4,303,923,578,873,608,000 | 30.760479 | 113 | 0.614065 | false |
nispc/ckanext-data_recommendation | ckanext/data_recommendation/plugin.py | 1 | 2619 | import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
from ckan.plugins.toolkit import asbool
import jieba
import jieba.analyse
from ckan.plugins.toolkit import request, c
import pylons.config as config
import opencc
class Data_RecommendationPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.ITemplateHelpers)
plugins.implements(plugins.IRoutes, inherit=True)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
toolkit.add_public_directory(config_, 'public')
toolkit.add_resource('fanstatic', 'data_recommendation')
@classmethod
def related_pkgs(cls):
# Parameter
extractNum = int(config.get('ckan.data_recommended.extract_num', '5'))
byTag = asbool(config.get('ckan.data_recommended.by_tag', 'true'))
byTitle = asbool(config.get('ckan.data_recommended.by_title', 'true'))
# fetch pkg info
pkg_name = request.environ['PATH_INFO'].split('/')[-1]
pkg_title = toolkit.get_action('package_show')({}, {'id':pkg_name})['title']
pkg_title_s = opencc.convert(pkg_title, config='zhtw2zhcn_s.ini')
pkg_tags = [pkg_tag['name'] for pkg_tag in toolkit.get_action('package_show')({}, {'id':pkg_name})['tags']]
# related_tag_titles
related_tag_titles = set()
if byTag:
related_tag_titles.update(set(pkg_tags))
if byTitle:
tmp = jieba.analyse.extract_tags(pkg_title_s, topK=extractNum)
related_tag_titles.update(
set(
(opencc.convert(_, config='zhs2zhtw_vp.ini') for _ in tmp)
)
)
related_pkgs = {}
related_pkgs['results'] = dict()
for related_tag_title in related_tag_titles:
tmp = toolkit.get_action('package_search')({}, {'q': '"'+related_tag_title+'"', 'rows': 20})
related_pkg_results = tmp['results']
related_pkgs['results'][related_tag_title] = dict()
related_pkgs['results'][related_tag_title]['rows'] = tmp['count']
# filte the same title
related_pkg_results = [_ for _ in related_pkg_results if _['title'] != pkg_title]
related_pkgs['results'][related_tag_title]['result'] = related_pkg_results
# related_pkgs['results'][related_tag_title] = sorted(related_pkgs['results'][related_tag_title], key=lambda t: len(t))
return related_pkgs
def get_helpers(self):
return {'related_pkgs': self.related_pkgs} | agpl-3.0 | -2,626,176,818,953,822,000 | 38.104478 | 127 | 0.623902 | false |
tomlepaine/bottle-skeleton | app.py | 1 | 1076 |
import argparse
from bottle import get, run, response, static_file, redirect
from jinja2 import Environment, PackageLoader
import config
parser = argparse.ArgumentParser(prog=config.name,
description=config.description)
parser.add_argument('--port',
type=int,
default=8080,
help='Port where gui is running.')
args = parser.parse_args()
# Setup globals
PORT = args.port
ENV = Environment(loader=PackageLoader(config.package_name,
config.template_dir))
@get('/')
def index():
redirect('/hello-world')
@get('/hello-world')
def hello_world():
template = ENV.get_template('hello-world.html')
page = template.render()
return page
@get('/page')
def page():
template = ENV.get_template('not-implemented.html')
page = template.render()
return page
@get('/frame/<index:int>.jpeg')
def frame(index):
response.content_type = "image/jpeg"
return VIDEO.get_frame(index)
run(host='localhost', port=PORT)
| bsd-2-clause | 3,597,068,652,393,336,000 | 19.692308 | 64 | 0.620818 | false |
whiteclover/Breeze | breeze/chatcenter/room.py | 1 | 1148 | import time
import logging
LOGGER = logging.getLogger(__name__)
class Room(object):
def __init__(self, name):
self.name = name
self.peers = {}
def broadcast(self, msg):
if msg:
for peer in self.peers.values():
if peer != msg.user:
LOGGER.info('peer: %s', peer)
peer.send(msg)
def add_peer(self, peer):
if peer.uid in self.peers:
raise Exception('in')
peer.add_room(self)
self.peers[peer.uid] = peer
def remove_peer(self, peer):
peer.remove_room(self)
del self.peers[peer.uid]
class RoomManager(object):
def __init__(self):
self.rooms = {}
def add_room(self, room):
self.rooms[room.name] = room
def remove_room(self, room):
if room.name in self.rooms:
del self.rooms[room.name]
def add_peer_to_room(self, room_name, peer):
room = self.rooms.get(room_name)
if not room:
room = Room(room_name)
self.rooms[room_name] = room
room.add_peer(peer)
def remove_peer_from_room(self, room_name, peer):
room = self.rooms.get(room_name)
if room:
room.remove_peer(peer)
def broadcast(self, room_name, msg):
room = self.rooms.get(room_name)
if room:
room.broadcast(msg)
| gpl-2.0 | -523,038,310,736,264,300 | 19.5 | 50 | 0.658537 | false |
mhumeSF/ansible-container | setup.py | 1 | 3215 | import os
import sys
import shlex
import shutil
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from setuptools.command.sdist import sdist as SDistCommand
from pip.req import parse_requirements
import container
class PlaybookAsTests(TestCommand):
user_options = [('ansible-args=', None, "Extra ansible arguments")]
def initialize_options(self):
self.ansible_args = u''
TestCommand.initialize_options(self)
def run(self):
if sys.platform == 'darwin':
# Docker for Mac exports certain paths into the virtual machine
# actually running Docker. The default tempdir isn't one of them,
# but /tmp is.
os.environ['TMPDIR'] = '/tmp'
return TestCommand.run(self)
def run_tests(self):
import subprocess
p = subprocess.Popen(
['ansible-playbook'] +
shlex.split(self.ansible_args) +
['run_tests.yml'],
cwd=os.path.join(os.getcwd(), 'test'),
)
rc = p.wait()
sys.exit(rc)
class BundleConductorFiles(SDistCommand):
def run(self):
shutil.copyfile('./setup.py', 'container/docker/files/setup.py')
shutil.copyfile('./conductor-requirements.txt',
'container/docker/files/conductor-requirements.txt')
shutil.copyfile('./conductor-requirements.yml',
'container/docker/files/conductor-requirements.yml')
return SDistCommand.run(self)
if container.ENV == 'host':
install_reqs = parse_requirements('requirements.txt', session=False)
setup_kwargs = dict(
install_requires=[str(ir.req) for ir in install_reqs if ir.match_markers()],
tests_require=[
'ansible>=2.3.0',
'pytest>=3',
'docker>=2.1',
'jmespath>=0.9'
],
extras_require={
'docker': ['docker>=2.1'],
'docbuild': ['Sphinx>=1.5.0'],
'openshift': ['openshift==0.0.1'],
'k8s': ['openshift==0.0.1']
},
#dependency_links=[
# 'https://github.com/ansible/ansible/archive/devel.tar.gz#egg=ansible-2.4.0',
#],
cmdclass={'test': PlaybookAsTests,
'sdist': BundleConductorFiles},
entry_points={
'console_scripts': [
'ansible-container = container.cli:host_commandline']
}
)
else:
setup_kwargs = dict(
entry_points={
'console_scripts': ['conductor = container.cli:conductor_commandline']
},
)
setup(
name='ansible-container',
version=container.__version__,
packages=find_packages(include='container.*'),
include_package_data=True,
zip_safe=False,
url='https://github.com/ansible/ansible-container',
license='LGPLv3 (See LICENSE file for terms)',
author='Joshua "jag" Ginsberg, Chris Houseknecht, and others (See AUTHORS file for contributors)',
author_email='[email protected]',
description=('Ansible Container empowers you to orchestrate, build, run, and ship '
'Docker images built from Ansible playbooks.'),
**setup_kwargs
)
| lgpl-3.0 | 496,850,982,274,389,440 | 33.202128 | 102 | 0.602488 | false |
googleapis/python-dialogflow | tests/unit/gapic/dialogflow_v2beta1/test_entity_types.py | 1 | 136892 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.entity_types import EntityTypesAsyncClient
from google.cloud.dialogflow_v2beta1.services.entity_types import EntityTypesClient
from google.cloud.dialogflow_v2beta1.services.entity_types import pagers
from google.cloud.dialogflow_v2beta1.services.entity_types import transports
from google.cloud.dialogflow_v2beta1.services.entity_types.transports.base import (
_API_CORE_VERSION,
)
from google.cloud.dialogflow_v2beta1.services.entity_types.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.dialogflow_v2beta1.types import entity_type
from google.cloud.dialogflow_v2beta1.types import entity_type as gcd_entity_type
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert EntityTypesClient._get_default_mtls_endpoint(None) is None
assert (
EntityTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
EntityTypesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
EntityTypesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
EntityTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert EntityTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [EntityTypesClient, EntityTypesAsyncClient,])
def test_entity_types_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize("client_class", [EntityTypesClient, EntityTypesAsyncClient,])
def test_entity_types_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_entity_types_client_get_transport_class():
transport = EntityTypesClient.get_transport_class()
available_transports = [
transports.EntityTypesGrpcTransport,
]
assert transport in available_transports
transport = EntityTypesClient.get_transport_class("grpc")
assert transport == transports.EntityTypesGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EntityTypesClient, transports.EntityTypesGrpcTransport, "grpc"),
(
EntityTypesAsyncClient,
transports.EntityTypesGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
EntityTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EntityTypesClient)
)
@mock.patch.object(
EntityTypesAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EntityTypesAsyncClient),
)
def test_entity_types_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(EntityTypesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(EntityTypesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(EntityTypesClient, transports.EntityTypesGrpcTransport, "grpc", "true"),
(
EntityTypesAsyncClient,
transports.EntityTypesGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(EntityTypesClient, transports.EntityTypesGrpcTransport, "grpc", "false"),
(
EntityTypesAsyncClient,
transports.EntityTypesGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
EntityTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EntityTypesClient)
)
@mock.patch.object(
EntityTypesAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EntityTypesAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_entity_types_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EntityTypesClient, transports.EntityTypesGrpcTransport, "grpc"),
(
EntityTypesAsyncClient,
transports.EntityTypesGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_entity_types_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EntityTypesClient, transports.EntityTypesGrpcTransport, "grpc"),
(
EntityTypesAsyncClient,
transports.EntityTypesGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_entity_types_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_entity_types_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.entity_types.transports.EntityTypesGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = EntityTypesClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_entity_types(
transport: str = "grpc", request_type=entity_type.ListEntityTypesRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = entity_type.ListEntityTypesResponse(
next_page_token="next_page_token_value",
)
response = client.list_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.ListEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntityTypesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_entity_types_from_dict():
test_list_entity_types(request_type=dict)
def test_list_entity_types_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
client.list_entity_types()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.ListEntityTypesRequest()
@pytest.mark.asyncio
async def test_list_entity_types_async(
transport: str = "grpc_asyncio", request_type=entity_type.ListEntityTypesRequest
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entity_type.ListEntityTypesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.ListEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntityTypesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_entity_types_async_from_dict():
await test_list_entity_types_async(request_type=dict)
def test_list_entity_types_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.ListEntityTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
call.return_value = entity_type.ListEntityTypesResponse()
client.list_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_entity_types_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.ListEntityTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entity_type.ListEntityTypesResponse()
)
await client.list_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_entity_types_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = entity_type.ListEntityTypesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_entity_types(
parent="parent_value", language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].language_code == "language_code_value"
def test_list_entity_types_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_entity_types(
entity_type.ListEntityTypesRequest(),
parent="parent_value",
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_list_entity_types_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = entity_type.ListEntityTypesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entity_type.ListEntityTypesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_entity_types(
parent="parent_value", language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].language_code == "language_code_value"
@pytest.mark.asyncio
async def test_list_entity_types_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_entity_types(
entity_type.ListEntityTypesRequest(),
parent="parent_value",
language_code="language_code_value",
)
def test_list_entity_types_pager():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
entity_type.ListEntityTypesResponse(
entity_types=[
entity_type.EntityType(),
entity_type.EntityType(),
entity_type.EntityType(),
],
next_page_token="abc",
),
entity_type.ListEntityTypesResponse(
entity_types=[], next_page_token="def",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(),], next_page_token="ghi",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(), entity_type.EntityType(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_entity_types(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, entity_type.EntityType) for i in results)
def test_list_entity_types_pages():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
entity_type.ListEntityTypesResponse(
entity_types=[
entity_type.EntityType(),
entity_type.EntityType(),
entity_type.EntityType(),
],
next_page_token="abc",
),
entity_type.ListEntityTypesResponse(
entity_types=[], next_page_token="def",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(),], next_page_token="ghi",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(), entity_type.EntityType(),],
),
RuntimeError,
)
pages = list(client.list_entity_types(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_entity_types_async_pager():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
entity_type.ListEntityTypesResponse(
entity_types=[
entity_type.EntityType(),
entity_type.EntityType(),
entity_type.EntityType(),
],
next_page_token="abc",
),
entity_type.ListEntityTypesResponse(
entity_types=[], next_page_token="def",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(),], next_page_token="ghi",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(), entity_type.EntityType(),],
),
RuntimeError,
)
async_pager = await client.list_entity_types(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, entity_type.EntityType) for i in responses)
@pytest.mark.asyncio
async def test_list_entity_types_async_pages():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entity_types),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
entity_type.ListEntityTypesResponse(
entity_types=[
entity_type.EntityType(),
entity_type.EntityType(),
entity_type.EntityType(),
],
next_page_token="abc",
),
entity_type.ListEntityTypesResponse(
entity_types=[], next_page_token="def",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(),], next_page_token="ghi",
),
entity_type.ListEntityTypesResponse(
entity_types=[entity_type.EntityType(), entity_type.EntityType(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_entity_types(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_entity_type(
transport: str = "grpc", request_type=entity_type.GetEntityTypeRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = entity_type.EntityType(
name="name_value",
display_name="display_name_value",
kind=entity_type.EntityType.Kind.KIND_MAP,
auto_expansion_mode=entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT,
enable_fuzzy_extraction=True,
)
response = client.get_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.GetEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, entity_type.EntityType)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.kind == entity_type.EntityType.Kind.KIND_MAP
assert (
response.auto_expansion_mode
== entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT
)
assert response.enable_fuzzy_extraction is True
def test_get_entity_type_from_dict():
test_get_entity_type(request_type=dict)
def test_get_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
client.get_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.GetEntityTypeRequest()
@pytest.mark.asyncio
async def test_get_entity_type_async(
transport: str = "grpc_asyncio", request_type=entity_type.GetEntityTypeRequest
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entity_type.EntityType(
name="name_value",
display_name="display_name_value",
kind=entity_type.EntityType.Kind.KIND_MAP,
auto_expansion_mode=entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT,
enable_fuzzy_extraction=True,
)
)
response = await client.get_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.GetEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, entity_type.EntityType)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.kind == entity_type.EntityType.Kind.KIND_MAP
assert (
response.auto_expansion_mode
== entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT
)
assert response.enable_fuzzy_extraction is True
@pytest.mark.asyncio
async def test_get_entity_type_async_from_dict():
await test_get_entity_type_async(request_type=dict)
def test_get_entity_type_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.GetEntityTypeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
call.return_value = entity_type.EntityType()
client.get_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_entity_type_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.GetEntityTypeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entity_type.EntityType()
)
await client.get_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_entity_type_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = entity_type.EntityType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_entity_type(
name="name_value", language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
assert args[0].language_code == "language_code_value"
def test_get_entity_type_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_entity_type(
entity_type.GetEntityTypeRequest(),
name="name_value",
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_get_entity_type_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entity_type), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = entity_type.EntityType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
entity_type.EntityType()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_entity_type(
name="name_value", language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
assert args[0].language_code == "language_code_value"
@pytest.mark.asyncio
async def test_get_entity_type_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_entity_type(
entity_type.GetEntityTypeRequest(),
name="name_value",
language_code="language_code_value",
)
def test_create_entity_type(
transport: str = "grpc", request_type=gcd_entity_type.CreateEntityTypeRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_entity_type.EntityType(
name="name_value",
display_name="display_name_value",
kind=gcd_entity_type.EntityType.Kind.KIND_MAP,
auto_expansion_mode=gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT,
enable_fuzzy_extraction=True,
)
response = client.create_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_entity_type.CreateEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_entity_type.EntityType)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.kind == gcd_entity_type.EntityType.Kind.KIND_MAP
assert (
response.auto_expansion_mode
== gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT
)
assert response.enable_fuzzy_extraction is True
def test_create_entity_type_from_dict():
test_create_entity_type(request_type=dict)
def test_create_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
client.create_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_entity_type.CreateEntityTypeRequest()
@pytest.mark.asyncio
async def test_create_entity_type_async(
transport: str = "grpc_asyncio",
request_type=gcd_entity_type.CreateEntityTypeRequest,
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_entity_type.EntityType(
name="name_value",
display_name="display_name_value",
kind=gcd_entity_type.EntityType.Kind.KIND_MAP,
auto_expansion_mode=gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT,
enable_fuzzy_extraction=True,
)
)
response = await client.create_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_entity_type.CreateEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_entity_type.EntityType)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.kind == gcd_entity_type.EntityType.Kind.KIND_MAP
assert (
response.auto_expansion_mode
== gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT
)
assert response.enable_fuzzy_extraction is True
@pytest.mark.asyncio
async def test_create_entity_type_async_from_dict():
await test_create_entity_type_async(request_type=dict)
def test_create_entity_type_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_entity_type.CreateEntityTypeRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
call.return_value = gcd_entity_type.EntityType()
client.create_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_entity_type_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_entity_type.CreateEntityTypeRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_entity_type.EntityType()
)
await client.create_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_entity_type_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_entity_type.EntityType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_entity_type(
parent="parent_value",
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entity_type == gcd_entity_type.EntityType(name="name_value")
assert args[0].language_code == "language_code_value"
def test_create_entity_type_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_entity_type(
gcd_entity_type.CreateEntityTypeRequest(),
parent="parent_value",
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_create_entity_type_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_entity_type.EntityType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_entity_type.EntityType()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_entity_type(
parent="parent_value",
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entity_type == gcd_entity_type.EntityType(name="name_value")
assert args[0].language_code == "language_code_value"
@pytest.mark.asyncio
async def test_create_entity_type_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_entity_type(
gcd_entity_type.CreateEntityTypeRequest(),
parent="parent_value",
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
)
def test_update_entity_type(
transport: str = "grpc", request_type=gcd_entity_type.UpdateEntityTypeRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_entity_type.EntityType(
name="name_value",
display_name="display_name_value",
kind=gcd_entity_type.EntityType.Kind.KIND_MAP,
auto_expansion_mode=gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT,
enable_fuzzy_extraction=True,
)
response = client.update_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_entity_type.UpdateEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_entity_type.EntityType)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.kind == gcd_entity_type.EntityType.Kind.KIND_MAP
assert (
response.auto_expansion_mode
== gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT
)
assert response.enable_fuzzy_extraction is True
def test_update_entity_type_from_dict():
test_update_entity_type(request_type=dict)
def test_update_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
client.update_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_entity_type.UpdateEntityTypeRequest()
@pytest.mark.asyncio
async def test_update_entity_type_async(
transport: str = "grpc_asyncio",
request_type=gcd_entity_type.UpdateEntityTypeRequest,
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_entity_type.EntityType(
name="name_value",
display_name="display_name_value",
kind=gcd_entity_type.EntityType.Kind.KIND_MAP,
auto_expansion_mode=gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT,
enable_fuzzy_extraction=True,
)
)
response = await client.update_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_entity_type.UpdateEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_entity_type.EntityType)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.kind == gcd_entity_type.EntityType.Kind.KIND_MAP
assert (
response.auto_expansion_mode
== gcd_entity_type.EntityType.AutoExpansionMode.AUTO_EXPANSION_MODE_DEFAULT
)
assert response.enable_fuzzy_extraction is True
@pytest.mark.asyncio
async def test_update_entity_type_async_from_dict():
await test_update_entity_type_async(request_type=dict)
def test_update_entity_type_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_entity_type.UpdateEntityTypeRequest()
request.entity_type.name = "entity_type.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
call.return_value = gcd_entity_type.EntityType()
client.update_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type.name=entity_type.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_entity_type_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_entity_type.UpdateEntityTypeRequest()
request.entity_type.name = "entity_type.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_entity_type.EntityType()
)
await client.update_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type.name=entity_type.name/value",) in kw[
"metadata"
]
def test_update_entity_type_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_entity_type.EntityType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_entity_type(
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == gcd_entity_type.EntityType(name="name_value")
assert args[0].language_code == "language_code_value"
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_entity_type_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_entity_type(
gcd_entity_type.UpdateEntityTypeRequest(),
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_entity_type_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_entity_type.EntityType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_entity_type.EntityType()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_entity_type(
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == gcd_entity_type.EntityType(name="name_value")
assert args[0].language_code == "language_code_value"
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_entity_type_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_entity_type(
gcd_entity_type.UpdateEntityTypeRequest(),
entity_type=gcd_entity_type.EntityType(name="name_value"),
language_code="language_code_value",
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_entity_type(
transport: str = "grpc", request_type=entity_type.DeleteEntityTypeRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.DeleteEntityTypeRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_entity_type_from_dict():
test_delete_entity_type(request_type=dict)
def test_delete_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
client.delete_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.DeleteEntityTypeRequest()
@pytest.mark.asyncio
async def test_delete_entity_type_async(
transport: str = "grpc_asyncio", request_type=entity_type.DeleteEntityTypeRequest
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.DeleteEntityTypeRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_entity_type_async_from_dict():
await test_delete_entity_type_async(request_type=dict)
def test_delete_entity_type_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.DeleteEntityTypeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
call.return_value = None
client.delete_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_entity_type_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.DeleteEntityTypeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_entity_type_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_entity_type(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_entity_type_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_entity_type(
entity_type.DeleteEntityTypeRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_entity_type_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entity_type), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_entity_type(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_entity_type_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_entity_type(
entity_type.DeleteEntityTypeRequest(), name="name_value",
)
def test_batch_update_entity_types(
transport: str = "grpc", request_type=entity_type.BatchUpdateEntityTypesRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_update_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchUpdateEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_update_entity_types_from_dict():
test_batch_update_entity_types(request_type=dict)
def test_batch_update_entity_types_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entity_types), "__call__"
) as call:
client.batch_update_entity_types()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchUpdateEntityTypesRequest()
@pytest.mark.asyncio
async def test_batch_update_entity_types_async(
transport: str = "grpc_asyncio",
request_type=entity_type.BatchUpdateEntityTypesRequest,
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_update_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchUpdateEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_update_entity_types_async_from_dict():
await test_batch_update_entity_types_async(request_type=dict)
def test_batch_update_entity_types_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchUpdateEntityTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entity_types), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_update_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_update_entity_types_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchUpdateEntityTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entity_types), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_update_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_delete_entity_types(
transport: str = "grpc", request_type=entity_type.BatchDeleteEntityTypesRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_delete_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchDeleteEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_delete_entity_types_from_dict():
test_batch_delete_entity_types(request_type=dict)
def test_batch_delete_entity_types_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
client.batch_delete_entity_types()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchDeleteEntityTypesRequest()
@pytest.mark.asyncio
async def test_batch_delete_entity_types_async(
transport: str = "grpc_asyncio",
request_type=entity_type.BatchDeleteEntityTypesRequest,
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_delete_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchDeleteEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_delete_entity_types_async_from_dict():
await test_batch_delete_entity_types_async(request_type=dict)
def test_batch_delete_entity_types_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchDeleteEntityTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_delete_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_delete_entity_types_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchDeleteEntityTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_delete_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_delete_entity_types_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_delete_entity_types(
parent="parent_value", entity_type_names=["entity_type_names_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entity_type_names == ["entity_type_names_value"]
def test_batch_delete_entity_types_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_delete_entity_types(
entity_type.BatchDeleteEntityTypesRequest(),
parent="parent_value",
entity_type_names=["entity_type_names_value"],
)
@pytest.mark.asyncio
async def test_batch_delete_entity_types_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entity_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_delete_entity_types(
parent="parent_value", entity_type_names=["entity_type_names_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entity_type_names == ["entity_type_names_value"]
@pytest.mark.asyncio
async def test_batch_delete_entity_types_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_delete_entity_types(
entity_type.BatchDeleteEntityTypesRequest(),
parent="parent_value",
entity_type_names=["entity_type_names_value"],
)
def test_batch_create_entities(
transport: str = "grpc", request_type=entity_type.BatchCreateEntitiesRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_create_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchCreateEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_create_entities_from_dict():
test_batch_create_entities(request_type=dict)
def test_batch_create_entities_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
client.batch_create_entities()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchCreateEntitiesRequest()
@pytest.mark.asyncio
async def test_batch_create_entities_async(
transport: str = "grpc_asyncio", request_type=entity_type.BatchCreateEntitiesRequest
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_create_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchCreateEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_create_entities_async_from_dict():
await test_batch_create_entities_async(request_type=dict)
def test_batch_create_entities_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchCreateEntitiesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_create_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_entities_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchCreateEntitiesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_create_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_create_entities_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_entities(
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entities == [entity_type.EntityType.Entity(value="value_value")]
assert args[0].language_code == "language_code_value"
def test_batch_create_entities_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_entities(
entity_type.BatchCreateEntitiesRequest(),
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_batch_create_entities_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_entities(
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entities == [entity_type.EntityType.Entity(value="value_value")]
assert args[0].language_code == "language_code_value"
@pytest.mark.asyncio
async def test_batch_create_entities_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_entities(
entity_type.BatchCreateEntitiesRequest(),
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
def test_batch_update_entities(
transport: str = "grpc", request_type=entity_type.BatchUpdateEntitiesRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_update_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchUpdateEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_update_entities_from_dict():
test_batch_update_entities(request_type=dict)
def test_batch_update_entities_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
client.batch_update_entities()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchUpdateEntitiesRequest()
@pytest.mark.asyncio
async def test_batch_update_entities_async(
transport: str = "grpc_asyncio", request_type=entity_type.BatchUpdateEntitiesRequest
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_update_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchUpdateEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_update_entities_async_from_dict():
await test_batch_update_entities_async(request_type=dict)
def test_batch_update_entities_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchUpdateEntitiesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_update_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_update_entities_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchUpdateEntitiesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_update_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_update_entities_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_update_entities(
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entities == [entity_type.EntityType.Entity(value="value_value")]
assert args[0].language_code == "language_code_value"
def test_batch_update_entities_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_update_entities(
entity_type.BatchUpdateEntitiesRequest(),
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_batch_update_entities_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_update_entities(
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entities == [entity_type.EntityType.Entity(value="value_value")]
assert args[0].language_code == "language_code_value"
@pytest.mark.asyncio
async def test_batch_update_entities_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_update_entities(
entity_type.BatchUpdateEntitiesRequest(),
parent="parent_value",
entities=[entity_type.EntityType.Entity(value="value_value")],
language_code="language_code_value",
)
def test_batch_delete_entities(
transport: str = "grpc", request_type=entity_type.BatchDeleteEntitiesRequest
):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_delete_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchDeleteEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_delete_entities_from_dict():
test_batch_delete_entities(request_type=dict)
def test_batch_delete_entities_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
client.batch_delete_entities()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchDeleteEntitiesRequest()
@pytest.mark.asyncio
async def test_batch_delete_entities_async(
transport: str = "grpc_asyncio", request_type=entity_type.BatchDeleteEntitiesRequest
):
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_delete_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == entity_type.BatchDeleteEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_delete_entities_async_from_dict():
await test_batch_delete_entities_async(request_type=dict)
def test_batch_delete_entities_field_headers():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchDeleteEntitiesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_delete_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_delete_entities_field_headers_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = entity_type.BatchDeleteEntitiesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_delete_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_delete_entities_flattened():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_delete_entities(
parent="parent_value",
entity_values=["entity_values_value"],
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entity_values == ["entity_values_value"]
assert args[0].language_code == "language_code_value"
def test_batch_delete_entities_flattened_error():
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_delete_entities(
entity_type.BatchDeleteEntitiesRequest(),
parent="parent_value",
entity_values=["entity_values_value"],
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_batch_delete_entities_flattened_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_entities), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_delete_entities(
parent="parent_value",
entity_values=["entity_values_value"],
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].entity_values == ["entity_values_value"]
assert args[0].language_code == "language_code_value"
@pytest.mark.asyncio
async def test_batch_delete_entities_flattened_error_async():
client = EntityTypesAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_delete_entities(
entity_type.BatchDeleteEntitiesRequest(),
parent="parent_value",
entity_values=["entity_values_value"],
language_code="language_code_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.EntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.EntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EntityTypesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.EntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EntityTypesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.EntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = EntityTypesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.EntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.EntityTypesGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.EntityTypesGrpcTransport, transports.EntityTypesGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = EntityTypesClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.EntityTypesGrpcTransport,)
def test_entity_types_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.EntityTypesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_entity_types_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.entity_types.transports.EntityTypesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.EntityTypesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_entity_types",
"get_entity_type",
"create_entity_type",
"update_entity_type",
"delete_entity_type",
"batch_update_entity_types",
"batch_delete_entity_types",
"batch_create_entities",
"batch_update_entities",
"batch_delete_entities",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_entity_types_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.entity_types.transports.EntityTypesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EntityTypesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_entity_types_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.entity_types.transports.EntityTypesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EntityTypesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_entity_types_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.entity_types.transports.EntityTypesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EntityTypesTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_entity_types_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EntityTypesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_entity_types_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EntityTypesClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.EntityTypesGrpcTransport, transports.EntityTypesGrpcAsyncIOTransport,],
)
@requires_google_auth_gte_1_25_0
def test_entity_types_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[transports.EntityTypesGrpcTransport, transports.EntityTypesGrpcAsyncIOTransport,],
)
@requires_google_auth_lt_1_25_0
def test_entity_types_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EntityTypesGrpcTransport, grpc_helpers),
(transports.EntityTypesGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_gte_1_26_0
def test_entity_types_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EntityTypesGrpcTransport, grpc_helpers),
(transports.EntityTypesGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_entity_types_transport_create_channel_old_api_core(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EntityTypesGrpcTransport, grpc_helpers),
(transports.EntityTypesGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_entity_types_transport_create_channel_user_scopes(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.EntityTypesGrpcTransport, transports.EntityTypesGrpcAsyncIOTransport],
)
def test_entity_types_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_entity_types_host_no_port():
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_entity_types_host_with_port():
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_entity_types_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EntityTypesGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_entity_types_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EntityTypesGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EntityTypesGrpcTransport, transports.EntityTypesGrpcAsyncIOTransport],
)
def test_entity_types_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EntityTypesGrpcTransport, transports.EntityTypesGrpcAsyncIOTransport],
)
def test_entity_types_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_types_grpc_lro_client():
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_entity_types_grpc_lro_async_client():
client = EntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_entity_type_path():
project = "squid"
entity_type = "clam"
expected = "projects/{project}/agent/entityTypes/{entity_type}".format(
project=project, entity_type=entity_type,
)
actual = EntityTypesClient.entity_type_path(project, entity_type)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "whelk",
"entity_type": "octopus",
}
path = EntityTypesClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = EntityTypesClient.parse_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = EntityTypesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = EntityTypesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = EntityTypesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = EntityTypesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = EntityTypesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = EntityTypesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = EntityTypesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = EntityTypesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = EntityTypesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = EntityTypesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = EntityTypesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = EntityTypesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = EntityTypesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = EntityTypesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = EntityTypesClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.EntityTypesTransport, "_prep_wrapped_messages"
) as prep:
client = EntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.EntityTypesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = EntityTypesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | 1,280,790,098,019,071,000 | 37.659136 | 118 | 0.662362 | false |
blockstack/blockstack-server | integration_tests/blockstack_integration_tests/scenarios/name_pre_reg_stacks_sendtokens_interleaved.py | 1 | 5075 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
import testlib
import virtualchain
import blockstack
import json
STACKS = testlib.TOKEN_TYPE_STACKS
# activate tokens
"""
TEST ENV BLOCKSTACK_EPOCH_1_END_BLOCK 682
TEST ENV BLOCKSTACK_EPOCH_2_END_BLOCK 683
TEST ENV BLOCKSTACK_EPOCH_3_END_BLOCK 684
TEST ENV BLOCKSTACK_EPOCH_2_NAMESPACE_LIFETIME_MULTIPLIER 1
TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_LIFETIME_MULTIPLIER 1
"""
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 0 ), # no tokens yet
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 0 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 0 ),
testlib.Wallet("9864768ccf5137392de5b5d6551a0f9f17279df2f82b4de7b905290f95fde66201", 0),
testlib.Wallet("2e55007fec0f1d9a81700e56aa8ce24d7e6e245ede48c00663f930f99fae133601", 0),
testlib.Wallet("9d6836cdaf81245d646988effe398338073892143b1185f4553e6118f231d1bf01", 0),
testlib.Wallet("f9c9371b7a8cc6b5ef544457cdd565e5791d743f5d60c924265732147429414801", 0),
testlib.Wallet("cd8d6bdf3dfd7b3d498539bb42cf218b77b0fda4f3bc119c7226d803e8425da901", 0),
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
def scenario( wallets, **kw ):
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey, version_bits=blockstack.NAMESPACE_VERSION_PAY_WITH_STACKS )
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
balances = testlib.get_wallet_balances(wallets[2])
assert balances[wallets[2].addr][STACKS] == 0
# should fail--not enough stacks
testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr, safety_checks=False, expect_fail=True )
testlib.next_block( **kw )
name_cost = testlib.blockstack_get_name_token_cost('foo.test')
assert name_cost['units'] == STACKS
assert name_cost['amount'] > 0
# send tokens and preorder multiple times in the block
# should all succeed, BUT: force them to go in order through UTXO chaining
for i in range(0, 5):
name_recipient_privkey = wallets[-(i+1)].privkey
name_recipient_addr = virtualchain.address_reencode(virtualchain.get_privkey_address(name_recipient_privkey))
testlib.blockstack_send_tokens(name_recipient_addr, "STACKS", name_cost['amount'], wallets[0].privkey)
testlib.send_funds(wallets[0].privkey, 1000000, name_recipient_addr)
testlib.blockstack_name_preorder( "foo_{}.test".format(i), name_recipient_privkey, wallets[3].addr, safety_checks=False )
testlib.blockstack_name_register( "foo_{}.test".format(i), name_recipient_privkey, wallets[3].addr, safety_checks=False )
testlib.next_block(**kw)
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
print "namespace reveal exists"
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
print "no namespace"
return False
if ns['namespace_id'] != 'test':
print "wrong namespace"
return False
# not preordered
for i in range(0, 5):
name = 'foo_{}.test'.format(i)
preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr )
if preorder is not None:
print "preorder exists"
return False
# registered
name_rec = state_engine.get_name( name )
if name_rec is None:
print "name does not exist"
return False
# owned by
if name_rec['address'] != wallets[3].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[3].addr):
print "sender is wrong"
return False
return True
| gpl-3.0 | -52,505,558,049,126,810 | 38.96063 | 203 | 0.703645 | false |
ge0rgi/cinder | cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_snapshot.py | 1 | 3977 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.fake_snapshot import fake_snapshot_obj
from cinder.tests.unit.volume.drivers.dell_emc import scaleio
from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks
class TestDeleteSnapShot(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.delete_snapshot()``"""
def setUp(self):
"""Setup a test case environment.
Creates fake volume and snapshot objects and sets up the required
API responses.
"""
super(TestDeleteSnapShot, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.snapshot = fake_snapshot_obj(
ctx, **{'provider_id': fake.SNAPSHOT_ID})
self.snapshot_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(
self.driver._id_to_base64(self.snapshot.id)
)
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.snapshot.id,
'instances/Volume::{}/action/removeMappedSdc'.format(
self.snapshot.provider_id
): self.snapshot.id,
'instances/Volume::{}/action/removeVolume'.format(
self.snapshot.provider_id
): self.snapshot.id,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
'instances/Volume::{}/action/removeVolume'.format(
self.snapshot.provider_id
): self.BAD_STATUS_RESPONSE,
},
self.RESPONSE_MODE.Invalid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: mocks.MockHTTPSResponse(
{
'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR,
'message': 'Test Delete Invalid Snapshot',
}, 400
),
'instances/Volume::{}/action/removeVolume'.format(
self.snapshot.provider_id): mocks.MockHTTPSResponse(
{
'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR,
'message': 'Test Delete Invalid Snapshot',
}, 400,
)
},
}
def test_bad_login(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot, self.snapshot)
def test_delete_invalid_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.delete_snapshot(self.snapshot)
def test_delete_snapshot(self):
"""Setting the unmap volume before delete flag for tests """
self.driver.configuration.set_override(
'sio_unmap_volume_before_deletion',
override=True)
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.delete_snapshot(self.snapshot)
| apache-2.0 | 8,501,585,848,030,189,000 | 40.863158 | 78 | 0.602967 | false |
rafafigueroa/compass-gait | hasimpy.py | 1 | 9216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Rafael Figueroa
"""
dp = True
import numpy as np
DEBUG = False
class H:
"""Hybrid Automata Model"""
def __init__(self, Q, Init_X, Init_qID, state_names = None):
self.q = Q #list of q
self.Init_X = Init_X
self.Init_qID = Init_qID
self.states = state_names
self.Ts = None
def mode_tracker_guard_check(self, qID, X):
# Called by mode_tracker to set the mode
q = self.q[qID]
g=q.E.G #guard list
oe=q.E.OE #out edges list
[g_activated, oID_activated_g] = guard_check(g, X)
# return new qID when a guard is activated
if g_activated:
qID_activated_g = oe[oID_activated_g]
else:
qID_activated_g = qID
return qID_activated_g
def sim(self, qID, X, u, t0, tlim,
haws_flag=False,
debug_flag=False, Ts=1e-4):
self.Ts = Ts
#t0 refers to the initial time of
#each continuous dynamic time interval
sr = SimResult(self.states) #Initialize class
q = self.q[qID] #get a ref to current mode
global DEBUG
DEBUG = debug_flag #change global DEBUG variable
while t0<tlim:
#get values from current q object
f=q.f #continuous dynamics func
# when simulating is requested by haws
# with a forced input
if not haws_flag:
u=q.u
g=q.E.G #guard list
r=q.E.R #reset map list
oe=q.E.OE #out edges list
dom=q.Dom #discrete mode domain
avoid=q.Avoid #discrete mode avoid
if DEBUG:
print '\n*** New Discrete State *** \n'
print 'f=',f,'\ng=',g,'\nr=',r,'\noe=',oe,'\ndom=',dom
print 'Avoid=',avoid
print 'qID=',q.qID,'\nX=',X,'\nu=',u
print '\n*** domain check *** \n'
if not dom(X):
errorString = 'Outside domain!'
print errorString
#raise NameError(errorString)
if DEBUG:
print '\n*** continuous dynamics *** \n'
#simulate continuous dynamics
T, Y, oID_activated_g, \
avoid_activated, tlim_activated = \
odeeul(f, u, g, avoid, X, t0, tlim, Ts)
# store this time interval
# in the simulation results
sr.newTimeInterval(T, Y, q)
# when inside the avoid set, simulation stops
# and the information is stored in the simulation results
if avoid_activated:
sr.avoid_activated = True
sr.timeToAvoid = T[-1]
break #while loop
if tlim_activated:
break #while loop
# *** after guard is activated ***
# prepare data for the next loop
t0=T[-1] #reset initial time to the end of
#last time interval
last_state = np.array(Y[-1])
if DEBUG:
print '\n *** reset map *** \n'
print 'last state =',last_state
X=r[oID_activated_g](last_state) #reset map
qID_activated_g = oe[oID_activated_g]
#guard activated print out
if DEBUG:
print 'sim -- guard activated'
print 'sim -- from q =', q.qID, 'to q =', qID_activated_g
print 'sim -- State =', X
#get new q
q = self.q[qID_activated_g]
return sr
class Q:
def __init__(self,qID,f,u,E,
Dom = lambda X:True,
Avoid = lambda X:False ,
TC=True):
self.qID = qID
self.f = f
self.u = u
self.E = E
self.Dom = Dom
self.Avoid = Avoid
self.TC = TC
class E:
def __init__(self,OE,G,R):
self.OE = OE
self.G = G
self.R = R
def guard_check(g,X):
guard_list = []
#evaluate every guard in g
#g is the list of guards for this q
#store the results in guard_list
for guard in g:
guard_list.append(guard(X))
oID_activated_g = None
g_activated = False
#check if any result in guard_list is True
#if it is, store the index
for oID,guard in enumerate(guard_list):
if guard:
oID_activated_g = oID #outside q which tripped the guard
g_activated = True
break
return [g_activated, oID_activated_g]
def avoid_check(avoid,X):
'avoid returns True when inside the avoid set'
return avoid(X)
def odeeul(f, u, g, avoid, X0, t0, tlim, Ts):
X=np.array(X0)
Y=np.array(X0)
T=np.array([t0])
if DEBUG:
print 'State=',X
g_activated, oID_activated_g = guard_check(g,X)
avoid_activated = avoid_check(avoid,X)
tlim_activated = (t0>=tlim)
if g_activated:
print 'instant jump'
if DEBUG:
print 'First checks:'
print '\tg_activated:', g_activated
print '\tavoid_activated', avoid_activated
print '\ttlim_activated', tlim_activated
while not (g_activated or avoid_activated or tlim_activated):
#Evolve continuously until a
#termination condition is activated
X=Ts*f(X,u)+X
Y=np.vstack((Y,X))
tnew = np.array([T[-1]+Ts])
T=np.concatenate([T,tnew])
#termination checks
g_activated, oID_activated_g = guard_check(g,X)
avoid_activated = avoid_check(avoid,X)
tlim_activated = (tnew>=tlim)
if DEBUG:
print 'Running checks:'
print '\tg_activated:',g_activated
print '\tavoid_activated',avoid_activated
print '\ttlim_activated',tlim_activated
return [T, Y, oID_activated_g, avoid_activated,
tlim_activated]
class SimResult:
"""Output from one simulation run"""
def __init__(self, states = None):
self.I = []
self.j = 0
self.timesteps = 0
self.timeToAvoid = None
self.avoid_activated = False
self.path = None
self.time = None
self.mode = None
self.states = states
for yi in range(0, len(states)):
self.states[yi] = "$" + self.states[yi] + "$"
self.states[yi] = self.states[yi].encode('string-escape')
self.states[yi] = self.states[yi].replace("\\\\", "\\")
def newTimeInterval(self, T, Y, qID):
"""Simulation is broken into continuous chunks
Here the chunks are put together"""
if self.j == 0:
# First interval
self.path = Y
self.time = T
self.mode = np.array([qID])
else:
self.path = np.vstack((self.path, Y))
self.time = np.concatenate((self.time, T))
self.mode = np.concatenate((self.mode, np.array([qID])))
self.j = self.j + 1
self.timesteps = self.timesteps + np.size(T)
self.I.append(TimeInterval(T, Y, self.j))
def simPlot(self):
Y_plot = self.path
T_plot = self.time
import matplotlib.pyplot as plt
# TODO: Configurate at install?
# user might not want latex
from matplotlib import rc
rc('text', usetex=True)
nstates = np.size(Y_plot,1)
f, axarr = plt.subplots(nstates, sharex=True)
if nstates>1:
for yi in range(nstates):
axarr[yi].plot(T_plot, Y_plot[:,yi])
if self.states is not None:
axarr[nstates-1].set_xlabel(r'time(s)')
axarr[yi].set_ylabel(self.states[yi], fontsize = 20)
axarr[yi].yaxis.set_label_coords(-0.08, 0.5)
else:
axarr.plot(T_plot,Y_plot)
if self.states is not None:
axarr.set_xlabel('time(s)')
axarr.set_ylabel(self.states[0])
plt.ion()
plt.show()
def phasePlot(self, plotStates):
#TODO:check size of Y,plotStates
X1_plot = self.path[:,plotStates[0]]
X2_plot = self.path[:,plotStates[1]]
import matplotlib.pyplot as plt
# figx = plt.figure()
f, axarr = plt.subplots(1, sharex=True)
axarr.plot(X1_plot,X2_plot)
if self.states is not None:
axarr.set_xlabel(self.states[plotStates[0]], fontsize = 20)
axarr.set_ylabel(self.states[plotStates[1]], fontsize = 20)
axarr.yaxis.set_label_coords(-0.08, 0.5)
plt.ion()
plt.show()
class TimeInterval:
def __init__(self,T,Y,j):
self.T=T
self.Y=Y
self.j=j
def idem(X):
return X
def tolEqual(a,b,tol=1e-2):
return abs(a-b)<tol
def last_row(Y):
print 'shape', np.shape(Y)
rows = np.shape(Y)[0]
print 'rows',rows
if rows>1:
return Y[-1]
else:
return Y
| gpl-2.0 | 5,182,060,363,343,078,000 | 26.927273 | 73 | 0.513129 | false |
scribusproject/scribus-tools | md_import.py | 1 | 1176 | run_script = True
import scribus
from tempfile import NamedTemporaryFile
try:
import markdown
except:
scribus.messageBox('python-markdown not installed',
'You need to install python-markdown for this script to work', scribus.ICON_WARNING)
run_script = False
run_script &= bool(scribus.getSelectedObject(0)) # We must have at least one selected object
if run_script and scribus.getSelectedObject(1):
result = scribus.messageBox('', 'More than one item selected, load all?',
button1=scribus.BUTTON_CANCEL, button2=scribus.BUTTON_YES)
if result == scribus.BUTTON_CANCEL:
run_script = False
def main():
md_name = scribus.fileDialog("Select a file", 'Markdown (*.md)')
if not md_name:
return
f = NamedTemporaryFile(suffix='.html')
markdown.markdownFromFile(md_name, f)
f.flush()
html_name = f.name
i = 0
while True:
ob_name = scribus.getSelectedObject(i)
if not ob_name:
break
if scribus.getObjectType(ob_name) == 'TextFrame':
scribus.insertHtmlText(html_name, ob_name)
i += 1
if run_script:
main()
| gpl-3.0 | -3,829,810,779,273,234,400 | 24.565217 | 93 | 0.643707 | false |
bruderstein/PythonScript | PythonLib/full/distutils/tests/__init__.py | 3 | 1344 | """Test suite for distutils.
This test suite consists of a collection of test modules in the
distutils.tests package. Each test module has a name starting with
'test' and contains a function test_suite(). The function is expected
to return an initialized unittest.TestSuite instance.
Tests for the command classes in the distutils.command package are
included in distutils.tests as well, instead of using a separate
distutils.command.tests package, since command identification is done
by import rather than matching pre-defined names.
"""
import os
import sys
import unittest
from test.support import run_unittest, save_restore_warnings_filters
here = os.path.dirname(__file__) or os.curdir
def test_suite():
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "distutils.tests." + fn[:-3]
# bpo-40055: Save/restore warnings filters to leave them unchanged.
# Importing tests imports docutils which imports pkg_resources
# which adds a warnings filter.
with save_restore_warnings_filters():
__import__(modname)
module = sys.modules[modname]
suite.addTest(module.test_suite())
return suite
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-2.0 | 7,258,331,073,946,869,000 | 32.6 | 79 | 0.69494 | false |
tuxerman/cdn-old | cdn/openstack/common/log.py | 1 | 19790 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
from six import moves
from cdn.openstack.common.gettextutils import _ # noqa
from cdn.openstack.common import importutils
from cdn.openstack.common import jsonutils
from cdn.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except moves.configparser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"cdn.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| apache-2.0 | 4,749,787,018,201,120,000 | 34.402504 | 78 | 0.595856 | false |
Code4SA/pmgbilltracker | pmg_scrapers/pmg/scrape_pdf.py | 1 | 2621 | import requests
import json
import os
import tempfile
import re
url = "http://db3sqepoi5n3s.cloudfront.net/files/pmb5_2013.pdf"
url = "http://db3sqepoi5n3s.cloudfront.net/files/130416pmb3-2013.pdf"
url = "http://db3sqepoi5n3s.cloudfront.net/files/131031b18b-2013.pdf"
url = "http://db3sqepoi5n3s.cloudfront.net/files/130621b15-2013.pdf"
url = "http://db3sqepoi5n3s.cloudfront.net/files/131118b55-2013public_administration_management.pdf"
reg_section1 = re.compile(r"section\s+(?:74|75|76|77)\s+bill", re.IGNORECASE)
reg_section2 = re.compile(r"section\s+(?:74|75|76|77)\b", re.IGNORECASE)
reg_introduced_by1 = re.compile(r"""
# Search for something that looks like (Minister of Finance)
\(
(
Minister
[^)]+
)
\)
""", re.VERBOSE | re.IGNORECASE)
reg_introduced_by2 = re.compile(r"""
# Search for something that looks like (Ms J Jacobson MP)
\(
(
[^)]+
MP)
\)
""", re.VERBOSE | re.IGNORECASE)
reg_introduced_by3 = re.compile(r"""
# Search for something that looks like (Select committee on Cooperative ....)
\(
([^)]*Committee[^)]*)
\)
""", re.VERBOSE | re.IGNORECASE)
def get_pdf(url, chunk_size=1000):
fp = tempfile.NamedTemporaryFile("rw", prefix="pmg_", suffix=".pdf", delete=False)
with open(fp.name, "wb") as fp:
resp = requests.get(url, stream=True)
for chunk in resp.iter_content(chunk_size):
fp.write(chunk)
return fp.name
def convert_to_text(path):
cmd = "pdftotext %s" % path
os.system(cmd)
return path.replace(".pdf", ".txt")
def extract_section(text):
match = reg_section1.search(text)
if not match:
match = reg_section2.search(text)
if not match:
return None
section = match.group()
if "74" in section: return 74
if "75" in section: return 75
if "76" in section: return 76
if "77" in section: return 77
def extract_introduced_by(text):
match = reg_introduced_by1.search(text)
if not match:
match = reg_introduced_by2.search(text)
if not match:
match = reg_introduced_by3.search(text)
if not match:
return "Boom!!"
return match.groups()[0]
def extract_introduction_location(text):
return "NA"
def scrape_pdf(url):
pdf_path = get_pdf(url)
text_path = convert_to_text(pdf_path)
text = open(text_path).read()[0:2000]
js = {
"section" : extract_section(text),
"introduced_by" : extract_introduced_by(text),
"introduced_at" : extract_introduction_location(text)
}
print json.dumps(js, indent=4)
scrape_pdf(url)
| apache-2.0 | 1,909,753,005,026,915,800 | 25.474747 | 100 | 0.638688 | false |
xmdy/ibstats | src/stats/models.py | 1 | 1877 | from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
import random
def get_random_value(start=100, end=100000):
def get_random():
return random.randint(start, end) * 0.01
return get_random
class Trader(models.Model):
name = models.CharField(verbose_name=_('name'), max_length=128)
balance = models.FloatField(verbose_name=_('balance'), default=get_random_value())
class Meta:
verbose_name = _('trader')
verbose_name_plural = _('traders')
ordering = ['-id']
def __unicode__(self):
return '<Trader: %s, %s>' % (self.id, self.name)
class Transaction(models.Model):
trader = models.ForeignKey(Trader, verbose_name=_('trader'))
time = models.DateTimeField(verbose_name=_('time'), auto_created=True)
amount = models.FloatField(verbose_name=_('amount'), default=get_random_value())
type = models.IntegerField(verbose_name=_('type'), default=1, db_index=True)
class Meta:
verbose_name = _('transaction')
verbose_name_plural = _('transactions')
ordering = ['-id']
def __unicode__(self):
return '<Transaction: %s, %s, %s, %s>' % (self.id, self.time, self.amount, self.trader_id)
class Deal(models.Model):
trader = models.ForeignKey(Trader, verbose_name=_('trader'))
time = models.DateTimeField(verbose_name=_('time'), db_index=True, auto_created=True)
amount = models.FloatField(verbose_name=_('amount'), default=get_random_value())
result_amount = models.FloatField(verbose_name=_('result amount'), default=get_random_value(-100000))
class Meta:
verbose_name = _('deal')
verbose_name_plural = _('deals')
ordering = ['-id']
def __unicode__(self):
return '<Deal: %s, %s, %s, %s>' % (self.id, self.time, self.amount, self.trader_id) | gpl-3.0 | -8,023,529,222,036,301,000 | 34.433962 | 105 | 0.639851 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.