repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
adobe-apiplatform/umapi-documentation | samples/AddAdobeIDUser.py | 1 | 2288 | #!/usr/bin/python
#*************************************************************************
#
# ADOBE CONFIDENTIAL
# ___________________
#
# Copyright 2017 Adobe Systems Incorporated
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying it.
# If you have received this file from a source other than Adobe, then your
# use, modification, or distribution of it requires the prior written
# permission of Adobe.
#**************************************************************************
import sys
if sys.version_info[0] == 2:
from ConfigParser import RawConfigParser
if sys.version_info[0] >= 3:
from configparser import RawConfigParser
import json
import requests
import random
import string
# read configuration file
config_file_name = "usermanagement.config"
config = RawConfigParser()
config.read(config_file_name)
# server parameters
host = config.get("server", "host")
endpoint = config.get("server", "endpoint")
# enterprise parameters
domain = config.get("enterprise", "domain")
org_id = config.get("enterprise", "org_id")
api_key = config.get("enterprise", "api_key")
access_token = config.get("enterprise", "access_token")
user_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12))
# method parameters
url = "https://" + host + endpoint + "/action/" + org_id
headers = {
"Content-type" : "application/json",
"Accept" : "application/json",
"x-api-key" : api_key,
"Authorization" : "Bearer " + access_token
}
json_data = \
[
{
"user" : user_string + "@" + domain,
"do" : [
{
"addAdobeID" : {
"email" : user_string + "@" + domain,
"country" : "US"
}
}
]
}
]
# prepare body
body = json.dumps(json_data)
print("Sending following request body to User Management Action API: " + body)
# send http request
res = requests.post(url, headers=headers, data=body)
# print response
print(res.status_code)
print(res.headers)
print(res.text)
# parse response body
if res.status_code == 200:
res_json_data = json.loads(res.text)
result = res_json_data["result"]
if result == "success":
print("Success");
exit(res.status_code) | mit | 4,264,379,447,331,527,000 | 25.310345 | 95 | 0.628934 | false |
letsencrypt/letsencrypt | certbot/account.py | 1 | 14319 | """Creates ACME accounts for server."""
import datetime
import functools
import hashlib
import logging
import os
import shutil
import socket
import josepy as jose
import pyrfc3339
import pytz
import six
import zope.component
from cryptography.hazmat.primitives import serialization
from acme import fields as acme_fields
from acme import messages
from certbot import constants
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.compat import misc
logger = logging.getLogger(__name__)
class Account(object): # pylint: disable=too-few-public-methods
"""ACME protocol registration.
:ivar .RegistrationResource regr: Registration Resource
:ivar .JWK key: Authorized Account Key
:ivar .Meta: Account metadata
:ivar str id: Globally unique account identifier.
"""
class Meta(jose.JSONObjectWithFields):
"""Account metadata
:ivar datetime.datetime creation_dt: Creation date and time (UTC).
:ivar str creation_host: FQDN of host, where account has been created.
.. note:: ``creation_dt`` and ``creation_host`` are useful in
cross-machine migration scenarios.
"""
creation_dt = acme_fields.RFC3339Field("creation_dt")
creation_host = jose.Field("creation_host")
def __init__(self, regr, key, meta=None):
self.key = key
self.regr = regr
self.meta = self.Meta(
# pyrfc3339 drops microseconds, make sure __eq__ is sane
creation_dt=datetime.datetime.now(
tz=pytz.UTC).replace(microsecond=0),
creation_host=socket.getfqdn()) if meta is None else meta
self.id = hashlib.md5(
self.key.key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
).hexdigest()
# Implementation note: Email? Multiple accounts can have the
# same email address. Registration URI? Assigned by the
# server, not guaranteed to be stable over time, nor
# canonical URI can be generated. ACME protocol doesn't allow
# account key (and thus its fingerprint) to be updated...
@property
def slug(self):
"""Short account identification string, useful for UI."""
return "{1}@{0} ({2})".format(pyrfc3339.generate(
self.meta.creation_dt), self.meta.creation_host, self.id[:4])
def __repr__(self):
return "<{0}({1}, {2}, {3})>".format(
self.__class__.__name__, self.regr, self.id, self.meta)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.key == other.key and self.regr == other.regr and
self.meta == other.meta)
def report_new_account(config):
"""Informs the user about their new ACME account."""
reporter = zope.component.queryUtility(interfaces.IReporter)
if reporter is None:
return
reporter.add_message(
"Your account credentials have been saved in your Certbot "
"configuration directory at {0}. You should make a secure backup "
"of this folder now. This configuration directory will also "
"contain certificates and private keys obtained by Certbot "
"so making regular backups of this folder is ideal.".format(
config.config_dir),
reporter.MEDIUM_PRIORITY)
class AccountMemoryStorage(interfaces.AccountStorage):
"""In-memory account storage."""
def __init__(self, initial_accounts=None):
self.accounts = initial_accounts if initial_accounts is not None else {}
def find_all(self):
return list(six.itervalues(self.accounts))
def save(self, account, acme):
# pylint: disable=unused-argument
if account.id in self.accounts:
logger.debug("Overwriting account: %s", account.id)
self.accounts[account.id] = account
def load(self, account_id):
try:
return self.accounts[account_id]
except KeyError:
raise errors.AccountNotFound(account_id)
class RegistrationResourceWithNewAuthzrURI(messages.RegistrationResource):
"""A backwards-compatible RegistrationResource with a new-authz URI.
Hack: Certbot versions pre-0.11.1 expect to load
new_authzr_uri as part of the account. Because people
sometimes switch between old and new versions, we will
continue to write out this field for some time so older
clients don't crash in that scenario.
"""
new_authzr_uri = jose.Field('new_authzr_uri')
class AccountFileStorage(interfaces.AccountStorage):
"""Accounts file storage.
:ivar .IConfig config: Client configuration
"""
def __init__(self, config):
self.config = config
util.make_or_verify_dir(config.accounts_dir, 0o700, misc.os_geteuid(),
self.config.strict_permissions)
def _account_dir_path(self, account_id):
return self._account_dir_path_for_server_path(account_id, self.config.server_path)
def _account_dir_path_for_server_path(self, account_id, server_path):
accounts_dir = self.config.accounts_dir_for_server_path(server_path)
return os.path.join(accounts_dir, account_id)
@classmethod
def _regr_path(cls, account_dir_path):
return os.path.join(account_dir_path, "regr.json")
@classmethod
def _key_path(cls, account_dir_path):
return os.path.join(account_dir_path, "private_key.json")
@classmethod
def _metadata_path(cls, account_dir_path):
return os.path.join(account_dir_path, "meta.json")
def _find_all_for_server_path(self, server_path):
accounts_dir = self.config.accounts_dir_for_server_path(server_path)
try:
candidates = os.listdir(accounts_dir)
except OSError:
return []
accounts = []
for account_id in candidates:
try:
accounts.append(self._load_for_server_path(account_id, server_path))
except errors.AccountStorageError:
logger.debug("Account loading problem", exc_info=True)
if not accounts and server_path in constants.LE_REUSE_SERVERS:
# find all for the next link down
prev_server_path = constants.LE_REUSE_SERVERS[server_path]
prev_accounts = self._find_all_for_server_path(prev_server_path)
# if we found something, link to that
if prev_accounts:
try:
self._symlink_to_accounts_dir(prev_server_path, server_path)
except OSError:
return []
accounts = prev_accounts
return accounts
def find_all(self):
return self._find_all_for_server_path(self.config.server_path)
def _symlink_to_account_dir(self, prev_server_path, server_path, account_id):
prev_account_dir = self._account_dir_path_for_server_path(account_id, prev_server_path)
new_account_dir = self._account_dir_path_for_server_path(account_id, server_path)
os.symlink(prev_account_dir, new_account_dir)
def _symlink_to_accounts_dir(self, prev_server_path, server_path):
accounts_dir = self.config.accounts_dir_for_server_path(server_path)
if os.path.islink(accounts_dir):
os.unlink(accounts_dir)
else:
os.rmdir(accounts_dir)
prev_account_dir = self.config.accounts_dir_for_server_path(prev_server_path)
os.symlink(prev_account_dir, accounts_dir)
def _load_for_server_path(self, account_id, server_path):
account_dir_path = self._account_dir_path_for_server_path(account_id, server_path)
if not os.path.isdir(account_dir_path): # isdir is also true for symlinks
if server_path in constants.LE_REUSE_SERVERS:
prev_server_path = constants.LE_REUSE_SERVERS[server_path]
prev_loaded_account = self._load_for_server_path(account_id, prev_server_path)
# we didn't error so we found something, so create a symlink to that
accounts_dir = self.config.accounts_dir_for_server_path(server_path)
# If accounts_dir isn't empty, make an account specific symlink
if os.listdir(accounts_dir):
self._symlink_to_account_dir(prev_server_path, server_path, account_id)
else:
self._symlink_to_accounts_dir(prev_server_path, server_path)
return prev_loaded_account
else:
raise errors.AccountNotFound(
"Account at %s does not exist" % account_dir_path)
try:
with open(self._regr_path(account_dir_path)) as regr_file:
regr = messages.RegistrationResource.json_loads(regr_file.read())
with open(self._key_path(account_dir_path)) as key_file:
key = jose.JWK.json_loads(key_file.read())
with open(self._metadata_path(account_dir_path)) as metadata_file:
meta = Account.Meta.json_loads(metadata_file.read())
except IOError as error:
raise errors.AccountStorageError(error)
acc = Account(regr, key, meta)
if acc.id != account_id:
raise errors.AccountStorageError(
"Account ids mismatch (expected: {0}, found: {1}".format(
account_id, acc.id))
return acc
def load(self, account_id):
return self._load_for_server_path(account_id, self.config.server_path)
def save(self, account, acme):
self._save(account, acme, regr_only=False)
def save_regr(self, account, acme):
"""Save the registration resource.
:param Account account: account whose regr should be saved
"""
self._save(account, acme, regr_only=True)
def delete(self, account_id):
"""Delete registration info from disk
:param account_id: id of account which should be deleted
"""
account_dir_path = self._account_dir_path(account_id)
if not os.path.isdir(account_dir_path):
raise errors.AccountNotFound(
"Account at %s does not exist" % account_dir_path)
# Step 1: Delete account specific links and the directory
self._delete_account_dir_for_server_path(account_id, self.config.server_path)
# Step 2: Remove any accounts links and directories that are now empty
if not os.listdir(self.config.accounts_dir):
self._delete_accounts_dir_for_server_path(self.config.server_path)
def _delete_account_dir_for_server_path(self, account_id, server_path):
link_func = functools.partial(self._account_dir_path_for_server_path, account_id)
nonsymlinked_dir = self._delete_links_and_find_target_dir(server_path, link_func)
shutil.rmtree(nonsymlinked_dir)
def _delete_accounts_dir_for_server_path(self, server_path):
link_func = self.config.accounts_dir_for_server_path
nonsymlinked_dir = self._delete_links_and_find_target_dir(server_path, link_func)
os.rmdir(nonsymlinked_dir)
def _delete_links_and_find_target_dir(self, server_path, link_func):
"""Delete symlinks and return the nonsymlinked directory path.
:param str server_path: file path based on server
:param callable link_func: callable that returns possible links
given a server_path
:returns: the final, non-symlinked target
:rtype: str
"""
dir_path = link_func(server_path)
# does an appropriate directory link to me? if so, make sure that's gone
reused_servers = {}
for k in constants.LE_REUSE_SERVERS:
reused_servers[constants.LE_REUSE_SERVERS[k]] = k
# is there a next one up?
possible_next_link = True
while possible_next_link:
possible_next_link = False
if server_path in reused_servers:
next_server_path = reused_servers[server_path]
next_dir_path = link_func(next_server_path)
if os.path.islink(next_dir_path) and os.readlink(next_dir_path) == dir_path:
possible_next_link = True
server_path = next_server_path
dir_path = next_dir_path
# if there's not a next one up to delete, then delete me
# and whatever I link to
while os.path.islink(dir_path):
target = os.readlink(dir_path)
os.unlink(dir_path)
dir_path = target
return dir_path
def _save(self, account, acme, regr_only):
account_dir_path = self._account_dir_path(account.id)
util.make_or_verify_dir(account_dir_path, 0o700, misc.os_geteuid(),
self.config.strict_permissions)
try:
with open(self._regr_path(account_dir_path), "w") as regr_file:
regr = account.regr
# If we have a value for new-authz, save it for forwards
# compatibility with older versions of Certbot. If we don't
# have a value for new-authz, this is an ACMEv2 directory where
# an older version of Certbot won't work anyway.
if hasattr(acme.directory, "new-authz"):
regr = RegistrationResourceWithNewAuthzrURI(
new_authzr_uri=acme.directory.new_authz,
body={},
uri=regr.uri)
else:
regr = messages.RegistrationResource(
body={},
uri=regr.uri)
regr_file.write(regr.json_dumps())
if not regr_only:
with util.safe_open(self._key_path(account_dir_path),
"w", chmod=0o400) as key_file:
key_file.write(account.key.json_dumps())
with open(self._metadata_path(
account_dir_path), "w") as metadata_file:
metadata_file.write(account.meta.json_dumps())
except IOError as error:
raise errors.AccountStorageError(error)
| apache-2.0 | -8,943,619,492,770,338,000 | 39.563739 | 95 | 0.621342 | false |
NMTHydro/SWACodingMeeting | src/meeting3/be.py | 1 | 1317 | # ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
def get_file_contents(p, root=None):
if root is None:
root = os.path.expanduser('~')
pp = os.path.join(root, p)
with open(pp, 'r') as rfile:
return [line.strip() for line in rfile]
def get_file_contents2(name):
root = os.path.expanduser('~')
pp = os.path.join(root, name)
with open(pp, 'r') as rfile:
return rfile.readlines()
# ============= EOF =============================================
| apache-2.0 | -8,939,215,115,441,067,000 | 34.594595 | 81 | 0.538345 | false |
postlund/home-assistant | homeassistant/components/konnected/binary_sensor.py | 1 | 2668 | """Support for wired binary sensors attached to a Konnected device."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_NAME,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DOMAIN as KONNECTED_DOMAIN, SIGNAL_SENSOR_UPDATE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up binary sensors attached to a Konnected device from a config entry."""
data = hass.data[KONNECTED_DOMAIN]
device_id = config_entry.data["id"]
sensors = [
KonnectedBinarySensor(device_id, pin_num, pin_data)
for pin_num, pin_data in data[CONF_DEVICES][device_id][
CONF_BINARY_SENSORS
].items()
]
async_add_entities(sensors)
class KonnectedBinarySensor(BinarySensorDevice):
"""Representation of a Konnected binary sensor."""
def __init__(self, device_id, zone_num, data):
"""Initialize the Konnected binary sensor."""
self._data = data
self._device_id = device_id
self._zone_num = zone_num
self._state = self._data.get(ATTR_STATE)
self._device_class = self._data.get(CONF_TYPE)
self._unique_id = f"{device_id}-{zone_num}"
self._name = self._data.get(CONF_NAME)
@property
def unique_id(self) -> str:
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(KONNECTED_DOMAIN, self._device_id)},
}
async def async_added_to_hass(self):
"""Store entity_id and register state change callback."""
self._data[ATTR_ENTITY_ID] = self.entity_id
async_dispatcher_connect(
self.hass, SIGNAL_SENSOR_UPDATE.format(self.entity_id), self.async_set_state
)
@callback
def async_set_state(self, state):
"""Update the sensor's state."""
self._state = state
self.async_schedule_update_ha_state()
| apache-2.0 | 1,640,747,530,150,869,800 | 28.644444 | 88 | 0.631184 | false |
pgmillon/ansible | lib/ansible/module_utils/basic.py | 1 | 107336 | # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import __main__
import atexit
import errno
import datetime
import grp
import fcntl
import locale
import os
import pwd
import platform
import re
import select
import shlex
import shutil
import signal
import stat
import subprocess
import sys
import tempfile
import time
import traceback
import types
from collections import deque
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.common.text.converters import (
jsonify,
container_to_bytes as json_dict_unicode_to_bytes,
container_to_text as json_dict_bytes_to_unicode,
)
from ansible.module_utils.common.text.formatters import (
lenient_lowercase,
bytes_to_human,
human_to_bytes,
SIZE_RANGES,
)
try:
from ansible.module_utils.common._json_compat import json
except ImportError as e:
print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e)))
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
# we may have been able to import md5 but it could still not be available
try:
hashlib.md5()
except ValueError:
AVAILABLE_HASH_ALGORITHMS.pop('md5', None)
except Exception:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except Exception:
pass
from ansible.module_utils.common._collections_compat import (
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import (
_PERM_BITS as PERM_BITS,
_EXEC_PERM_BITS as EXEC_PERM_BITS,
_DEFAULT_PERM as DEFAULT_PERM,
is_executable,
format_attributes,
get_flags_from_attributes,
)
from ansible.module_utils.common.sys_info import (
get_distribution,
get_distribution_version,
get_platform_subclass,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.common.parameters import (
handle_aliases,
list_deprecations,
list_no_log_values,
PASS_VARS,
PASS_BOOLS,
)
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils.common.validation import (
check_missing_parameters,
check_mutually_exclusive,
check_required_arguments,
check_required_by,
check_required_if,
check_required_one_of,
check_required_together,
count_terms,
check_type_bool,
check_type_bits,
check_type_bytes,
check_type_float,
check_type_int,
check_type_jsonarg,
check_type_list,
check_type_dict,
check_type_path,
check_type_raw,
check_type_str,
safe_eval,
)
from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
#
# Deprecated functions
#
def get_platform():
'''
**Deprecated** Use :py:func:`platform.system` directly.
:returns: Name of the platform the module is running on in a native string
Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
the result of calling :py:func:`platform.system`.
'''
return platform.system()
# End deprecated functions
#
# Compat shims
#
def load_platform_subclass(cls, *args, **kwargs):
"""**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
platform_cls = get_platform_subclass(cls)
return super(cls, platform_cls).__new__(platform_cls)
def get_all_subclasses(cls):
"""**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
return list(_get_all_subclasses(cls))
# End compat shims
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def missing_required_lib(library, reason=None, url=None):
hostname = platform.node()
msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
if reason:
msg += " This is required %s." % reason
if url:
msg += " See %s for more info." % url
return msg + " Please read module documentation and install in the appropriate location"
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, required_by=None):
'''
Common code for quickly building an ansible module in Python
(although you can write modules with anything that can return JSON).
See :ref:`developing_modules_general` for a general introduction
and :ref:`developing_program_flow_modules` for more detailed explanation.
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.required_by = required_by
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self._string_conversion_action = ''
self.aliases = {}
self._legal_inputs = []
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except (ValueError, TypeError) as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._check_required_by(required_by)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set and we have a remote_tmp,
# the module needs to create it and clean it up once finished.
# otherwise we create our own module tmp dir from the system defaults
if self._tmpdir is None:
basedir = None
if self._remote_tmp is not None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if basedir is not None and not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
except (OSError, IOError) as e:
self.warn("Unable to use %s as temporary directory, "
"failing back to system: %s" % (basedir, to_native(e)))
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
"created with a mode of 0700, this may cause"
" issues when running as another user. To "
"avoid this, create the remote_tmp dir with "
"the correct permissions manually" % basedir)
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
except (OSError, IOError) as e:
self.fail_json(
msg="Failed to create remote module tmp path at dir %s "
"with prefix %s: %s" % (basedir, basefile, to_native(e))
)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except Exception:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno in (errno.EPERM, errno.EROFS): # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except Exception:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# this uses exceptions as it happens before we can safely call fail_json
alias_results, self._legal_inputs = handle_aliases(spec, param)
return alias_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
self.no_log_values.update(list_no_log_values(spec, param))
self._deprecations.extend(list_deprecations(spec, param))
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for k in list(param.keys()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
for k in PASS_VARS:
# handle setting internal properties from internal ansible vars
param_key = '_ansible_%s' % k
if param_key in param:
if k in PASS_BOOLS:
setattr(self, PASS_VARS[k][0], self.boolean(param[param_key]))
else:
setattr(self, PASS_VARS[k][0], param[param_key])
# clean up internal top level params:
if param_key in self.params:
del self.params[param_key]
else:
# use defaults if not already set
if not hasattr(self, PASS_VARS[k][0]):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
if param is None:
param = self.params
return count_terms(check, param)
def _check_mutually_exclusive(self, spec, param=None):
if param is None:
param = self.params
try:
check_mutually_exclusive(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_one_of(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_together(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_by(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_by(spec, param)
except TypeError as e:
self.fail_json(msg=to_native(e))
def _check_required_arguments(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
try:
check_required_arguments(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
try:
check_required_if(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
return safe_eval(value, locals, include_exceptions)
def _check_type_str(self, value):
opts = {
'error': False,
'warn': False,
'ignore': True
}
# Ignore, warn, or error when converting to a string.
allow_conversion = opts.get(self._string_conversion_action, True)
try:
return check_type_str(value, allow_conversion)
except TypeError:
common_msg = 'quote the entire value to ensure it does not change.'
if self._string_conversion_action == 'error':
msg = common_msg.capitalize()
raise TypeError(to_native(msg))
elif self._string_conversion_action == 'warn':
msg = ('The value {0!r} (type {0.__class__.__name__}) in a string field was converted to {1!r} (type string). '
'If this does not look like what you expect, {2}').format(value, to_text(value), common_msg)
self.warn(to_native(msg))
return to_native(value, errors='surrogate_or_strict')
def _check_type_list(self, value):
return check_type_list(value)
def _check_type_dict(self, value):
return check_type_dict(value)
def _check_type_bool(self, value):
return check_type_bool(value)
def _check_type_int(self, value):
return check_type_int(value)
def _check_type_float(self, value):
return check_type_float(value)
def _check_type_path(self, value):
return check_type_path(value)
def _check_type_jsonarg(self, value):
return check_type_jsonarg(value)
def _check_type_raw(self, value):
return check_type_raw(value)
def _check_type_bytes(self, value):
return check_type_bytes(value)
def _check_type_bits(self, value):
return check_type_bits(value)
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._check_required_by(v.get('required_by', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _get_wanted_type(self, wanted, k):
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
return type_checker, wanted
def _handle_elements(self, wanted, param, values):
type_checker, wanted_name = self._get_wanted_type(wanted, param)
validated_params = []
for value in values:
try:
validated_params.append(type_checker(value))
except (TypeError, ValueError) as e:
msg = "Elements value for option %s" % param
if self._options_context:
msg += " found in '%s'" % " -> ".join(self._options_context)
msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_name, to_native(e))
self.fail_json(msg=msg)
return validated_params
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
type_checker, wanted_name = self._get_wanted_type(wanted, k)
try:
param[k] = type_checker(value)
wanted_elements = v.get('elements', None)
if wanted_elements:
if wanted != 'list' or not isinstance(param[k], list):
msg = "Invalid type %s for option '%s'" % (wanted_name, param)
if self._options_context:
msg += " found in '%s'." % " -> ".join(self._options_context)
msg += ", elements value check is supported only with 'list' type"
self.fail_json(msg=msg)
param[k] = self._handle_elements(wanted_elements, k, param[k])
except (TypeError, ValueError) as e:
msg = "argument %s is of type %s" % (k, type(value))
if self._options_context:
msg += " found in '%s'." % " -> ".join(self._options_context)
msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
self.fail_json(msg=msg)
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except Exception:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except Exception:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
Find system executable in PATH.
:param arg: The executable to find.
:param required: if executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
:returns: if found return full path; otherwise return None
'''
bin_path = None
try:
bin_path = get_bin_path(arg, required, opt_dirs)
except ValueError as e:
self.fail_json(msg=to_text(e))
return bin_path
def boolean(self, arg):
'''Convert the argument to a boolean'''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
elif isinstance(d, Mapping):
self.deprecate(d['msg'], version=d.get('version', None))
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# Add traceback if debug or high verbosity and it is missing
# NOTE: Badly named as exception, it really always has been a traceback
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
if PY2:
# On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
''.join(traceback.format_tb(sys.exc_info()[2]))
else:
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
if not required_params:
return
try:
check_missing_parameters(self.params, required_params)
except TypeError as e:
self.fail_json(msg=to_native(e))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
b_filename = to_bytes(filename, errors='surrogate_or_strict')
if not os.path.exists(b_filename):
return None
if os.path.isdir(b_filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(b_filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
(src, dest, b_tmp_dest_name, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), self.get_buffer_size(file_descriptor))
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def _restore_signal_handlers(self):
# Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
if PY2 and sys.platform != 'win32':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment variable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
dictates whether ``~`` is expanded in paths and environment variables
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
:kw pass_fds: When running on python3 this argument
dictates which file descriptors should be passed
to an underlying ``Popen`` constructor.
:kw before_communicate_callback: This function will be called
after ``Popen`` object will be created
but before communicating to the process.
(``Popen`` object will be passed to callback as a first argument)
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args])
else:
args = to_bytes(args, errors='surrogate_or_strict')
# not set explicitly, check if set by controller
if executable:
executable = to_bytes(executable, errors='surrogate_or_strict')
args = [executable, b'-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand ``~`` in paths, and all environment vars
if expand_user_and_vars:
args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None]
else:
args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module.py and explode, the remote lib path will resemble:
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system:
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=self._restore_signal_handlers,
)
if PY3 and pass_fds:
kwargs["pass_fds"] = pass_fds
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict')
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
if before_communicate_callback:
before_communicate_callback(cmd)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
@staticmethod
def get_buffer_size(fd):
try:
# 1032 == FZ_GETPIPE_SZ
buffer_size = fcntl.fcntl(fd, 1032)
except Exception:
try:
# not as exact as above, but should be good enough for most platforms that fail the previous call
buffer_size = select.PIPE_BUF
except Exception:
buffer_size = 9000 # use sane default JIC
return buffer_size
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| gpl-3.0 | 8,027,137,637,342,028,000 | 39.065696 | 155 | 0.554055 | false |
tchellomello/home-assistant | homeassistant/components/arcam_fmj/device_trigger.py | 1 | 2316 | """Provides device automations for Arcam FMJ Receiver control."""
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, Event, HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, EVENT_TURN_ON
TRIGGER_TYPES = {"turn_on"}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Arcam FMJ Receiver control devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain == "media_player":
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_on",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "turn_on":
entity_id = config[CONF_ENTITY_ID]
@callback
def _handle_event(event: Event):
if event.data[ATTR_ENTITY_ID] == entity_id:
hass.async_run_job(
action,
{"trigger": {**config, "description": f"{DOMAIN} - {entity_id}"}},
event.context,
)
return hass.bus.async_listen(EVENT_TURN_ON, _handle_event)
return lambda: None
| apache-2.0 | -999,182,314,678,034,800 | 29.88 | 86 | 0.629534 | false |
kcompher/abstract_rendering | examples/numpyDemo.py | 1 | 3768 | #!/usr/bin/env python
"""
Draws a colormapped image plot
- Left-drag pans the plot.
- Mousewheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Abstract rendering imports
from __future__ import print_function, division, absolute_import
import abstract_rendering.util as util
import abstract_rendering.core as core
import abstract_rendering.numeric as numeric
import abstract_rendering.categories as categories
import abstract_rendering.infos as infos
import abstract_rendering.numpyglyphs as npg
from timer import Timer
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
red = util.Color(255,0,0,255)
green = util.Color(0,255,0,255)
blue = util.Color(0,0,255,255)
purple = util.Color(125,0,255,255)
white = util.Color(255,255,255,255)
black = util.Color(0,0,0,255)
clear = util.Color(0,0,0,0)
with Timer("Loeading") as arTimer:
#glyphs = npg.load_csv("../data/circlepoints.csv", 1, 2, 3, 4)
glyphs = npg.load_hdf("../data/CensusTracts.hdf5", "__data__", "LAT", "LON")
#glyphs = npg.load_hdf("../data/tweets-subset.hdf", "test",
# "longitude", "latitude", vc="lang_primary")
screen = (800,600)
ivt = util.zoom_fit(screen,glyphs.bounds())
with Timer("Abstract-Render") as arTimer:
image = core.render(glyphs,
infos.encode(["Arabic","English","Turkish","Russian"]),
npg.PointCountCategories(),
npg.Spread(2) + categories.HDAlpha([red, blue, green, purple, black], alphamin=.3, log=True),
screen,
ivt)
# image = core.render(glyphs,
# infos.valAt(4,0),
# npg.PointCount(),
# npg.Spread(1) + numeric.BinarySegment(white, black, 1),
# screen,
# ivt)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("imagedata", image)
# Create the plot
plot = Plot(pd)
img_plot = plot.img_plot("imagedata")[0]
# Tweak some of the plot properties
plot.title = "Abstract Rendering"
plot.padding = 50
return plot
#===============================================================================
# Attributes to use for the plot view.
size=(800,600)
title="Basic Colormapped Image Plot"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
| bsd-3-clause | 3,856,248,107,765,127,700 | 35.230769 | 119 | 0.538482 | false |
jmgilman/Neolib | neolib/inventory/UserInventory.py | 1 | 2623 | """:mod:`UserInventory` -- Provides an interface for a user inventory
.. module:: UserInventory
:synopsis: Provides an interface for a user inventory
.. moduleauthor:: Joshua Gilman <[email protected]>
"""
from neolib.exceptions import parseException
from neolib.exceptions import invalidUser
from neolib.inventory.Inventory import Inventory
from neolib.item.Item import Item
import logging
class UserInventory(Inventory):
"""Represents a user's inventory
Sub-classes the Inventory class to provide an interface for a user's
inventory. Will automatically populate itself with all items
in a user's inventory upon initialization.
Example
>>> usr.loadInventory
>>> for item in usr.inventory:
... print item.name
Blue Kougra Plushie
Lu Codestone
...
"""
usr = None
def __init__(self, usr):
if not usr:
raise invalidUser
self.usr = usr
def load(self):
"""Loads a user's inventory
Queries the user's inventory, parses each item, and adds
each item to the inventory. Note this class should not be
used directly, but rather usr.inventory should be used to
access a user's inventory.
Parameters
usr (User) - The user to load the inventory for
Raises
invalidUser
parseException
"""
self.items = {}
pg = self.usr.getPage("http://www.neopets.com/objects.phtml?type=inventory")
# Indicates an empty inventory
if "You aren't carrying anything" in pg.content:
return
try:
for row in pg.find_all("td", "contentModuleContent")[1].table.find_all("tr"):
for item in row.find_all("td"):
name = item.text
# Some item names contain extra information encapsulated in paranthesis
if "(" in name:
name = name.split("(")[0]
tmpItem = Item(name)
tmpItem.id = item.a['onclick'].split("(")[1].replace(");", "")
tmpItem.img = item.img['src']
tmpItem.desc = item.img['alt']
tmpItem.usr = self.usr
self.items[name] = tmpItem
except Exception:
logging.getLogger("neolib.inventory").exception("Unable to parse user inventory.", {'pg': pg})
raise parseException
| mit | 2,876,575,541,733,346,300 | 31.7875 | 106 | 0.556615 | false |
ddanier/django_price | django_price/models.py | 1 | 1728 | # coding: utf-8
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django_deferred_polymorph.models import SubDeferredPolymorphBaseModel
import decimal
import datetime
from .manager import TaxManager
# TODO: Versionized Tax (Tax should NEVER get changed, as this may
# create an invalid state if you store net + gross for invoices
class Tax(SubDeferredPolymorphBaseModel):
name = models.CharField(max_length=25)
created = models.DateTimeField(editable=False, default=datetime.datetime.now)
modified = models.DateTimeField(editable=False, auto_now=True)
objects = TaxManager()
def __unicode__(self):
return self.name
@property
def unique_id(self):
return self.get_tax().unique_id
def amount(self, net):
return self.get_tax().amount(net)
def apply(self, net):
return self.get_tax().apply(net)
def reverse(self, gross):
return self.get_tax().reverse(gross)
def get_tax(self):
raise RuntimeError('subclass must implement this')
class LinearTax(Tax):
# TODO: PercentField?
percent = models.DecimalField(max_digits=6, decimal_places=3)
def get_tax(self):
from . import LinearTax
tax = LinearTax(self.name, self.percent)
tax._unique_id = 'linear-pk-%d' % self.pk
tax._model_instance = self
return tax
class MultiTax(Tax):
taxes = models.ManyToManyField(Tax, related_name='+')
def get_tax(self):
from . import MultiTax
tax = MultiTax(list(self.taxes.all()), self.name)
tax._unique_id = 'multi-pk-%d' % self.pk
tax._model_instance = self
return tax
| bsd-3-clause | -4,973,895,535,541,663,000 | 27.327869 | 81 | 0.656829 | false |
sdelements/django-multi-import | multi_import/helpers/fields.py | 1 | 1063 | from rest_framework import relations
from tablib.compat import unicode
from multi_import.helpers import strings
list_separator = ";"
def to_string_representation(field, value):
if hasattr(field, "to_string_representation"):
return field.to_string_representation(value)
if isinstance(field, relations.ManyRelatedField):
if value is None:
value = []
return unicode(list_separator).join(
[to_string_representation(field.child_relation, val) for val in value]
)
if value is None:
value = ""
return strings.normalize_string(unicode(value))
def from_string_representation(field, value):
if hasattr(field, "from_string_representation"):
return field.from_string_representation(value)
if not isinstance(field, relations.ManyRelatedField):
return value
if not value:
return []
return [
from_string_representation(field.child_relation, val)
for val in value.split(list_separator)
if val and not val.isspace()
]
| mit | -3,567,508,909,733,419,000 | 24.309524 | 82 | 0.669802 | false |
develersrl/rooms | editor/structdata/area.py | 1 | 1839 | #!/usr/bin/env python
from origin import OriginData
from structdata.project import g_project
class Area(OriginData):
"""
Area gestisce coordinate e dimensione fisiche. Nell'xml le informazioni
sono in coordinate logiche; al momento del caricamento del progetto viene
fatta la conversione
"""
tag_name = 'area'
def __init__(self, id, x, y, width, height, event):
super(Area, self).__init__()
self.id = id
self.x = str(float(x) * float(g_project.data['world'].width))
self.y = str(float(y) * float(g_project.data['world'].height))
self.height = str(float(height) * float(g_project.data['world'].height))
self.width = str(float(width) * float(g_project.data['world'].width))
self.event = event
def setName(self, name):
self.id = name
g_project.notify()
def valueForKey(self, key, value):
if key == "x" or key == "width":
return str(round(float(value) / float(g_project.data['world'].width), 2))
elif key == "y" or key == "height":
return str(round(float(value) / float(g_project.data['world'].height), 2))
else:
return value
@staticmethod
def create(room, x, y, width, height, event=""):
number_of_new_area = 0
for area in room.areas:
if area.id.startswith("new_area_"):
number_of_new_area += 1
area = Area("new_area_%d" % (number_of_new_area + 1),
str(x / float(g_project.data['world'].width)),
str(y / float(g_project.data['world'].height)),
str(width / float(g_project.data['world'].width)),
str(height / float(g_project.data['world'].height)),
event)
room.areas.append(area)
return area
| mit | 4,306,555,536,022,000,600 | 37.3125 | 86 | 0.567156 | false |
abadger/Bento | doc/source/conf.py | 1 | 6743 | # -*- coding: utf-8 -*-
#
# Bento documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 3 12:53:13 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
import bento
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Bento'
copyright = u'2009-2011, David Cournapeau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = bento.__version__
# The full version, including alpha/beta/rc tags.
release = "%s-git%s" % (version, bento.__git_revision__[:10])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bentodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bento.tex', u'Bento Documentation',
u'David Cournapeau', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause | -7,131,424,483,700,285,000 | 32.381188 | 148 | 0.71437 | false |
stormi/tsunami | src/primaires/scripting/fonctions/fermee.py | 1 | 2507 | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction fermee."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Test si une porte est fermée ou non."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.fermee, "Salle", "str")
@staticmethod
def fermee(salle, nom_sortie):
"""Retourne vrai si la sortie de la salle est fermée, faux sinon.
NOTE: si la sortie indiquée n'est pas une porte, une erreur est envoyée.
"""
sortie = salle.sorties.get_sortie_par_nom_ou_direction(nom_sortie)
if sortie is None:
raise ErreurExecution("la sortie {} n'existe pas dans la " \
"salle {}".format(repr(nom_sortie), repr(salle.ident)))
if not sortie.porte:
raise ErreurExecution("cette sortie n'a aucune porte")
return sortie.porte.fermee
| bsd-3-clause | 7,735,413,650,236,539,000 | 40.716667 | 80 | 0.73312 | false |
gurneyalex/odoo | addons/mrp/__manifest__.py | 4 | 1809 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Manufacturing',
'version': '2.0',
'website': 'https://www.odoo.com/page/manufacturing',
'category': 'Manufacturing/Manufacturing',
'sequence': 16,
'summary': 'Manufacturing Orders & BOMs',
'depends': ['product', 'stock', 'resource'],
'description': "",
'data': [
'security/mrp_security.xml',
'security/ir.model.access.csv',
'data/mrp_data.xml',
'wizard/mrp_product_produce_views.xml',
'wizard/change_production_qty_views.xml',
'wizard/mrp_workcenter_block_view.xml',
'wizard/stock_warn_insufficient_qty_views.xml',
'views/mrp_views_menus.xml',
'views/stock_move_views.xml',
'views/mrp_workorder_views.xml',
'views/mrp_workcenter_views.xml',
'views/mrp_production_views.xml',
'views/mrp_routing_views.xml',
'views/mrp_bom_views.xml',
'views/product_views.xml',
'views/stock_warehouse_views.xml',
'views/stock_picking_views.xml',
'views/mrp_unbuild_views.xml',
'views/ir_attachment_view.xml',
'views/res_config_settings_views.xml',
'views/mrp_templates.xml',
'views/stock_scrap_views.xml',
'report/mrp_report_views_main.xml',
'report/mrp_report_bom_structure.xml',
'report/mrp_production_templates.xml',
'report/report_stock_rule.xml',
'report/mrp_zebra_production_templates.xml',
],
'qweb': ['static/src/xml/mrp.xml'],
'demo': [
'data/mrp_demo.xml',
],
'test': [],
'application': True,
'pre_init_hook': '_pre_init_mrp',
'post_init_hook': '_create_warehouse_data',
'uninstall_hook': 'uninstall_hook',
}
| agpl-3.0 | -6,602,745,014,235,731,000 | 33.788462 | 74 | 0.598121 | false |
mehtadev17/mapusaurus | mapusaurus/hmda/tests/test_loader.py | 1 | 2619 | import os
from django.test import TestCase
from mock import Mock, patch
from hmda.management.commands.load_hmda import Command
from hmda.models import HMDARecord
class LoadHmdaTest(TestCase):
fixtures = ['dummy_tracts']
def test_handle(self):
command = Command()
command.stdout = Mock()
command.handle(os.path.join("hmda", "tests", "mock_2014.csv"))
# The mock data file contains 10 records, 8 for known states
self.assertEqual(8, HMDARecord.objects.count())
lenders = set(r.institution_id for r in HMDARecord.objects.all())
geos = set(r.geo_id for r in HMDARecord.objects.all())
self.assertEqual(3, len(lenders))
self.assertTrue(('5' + '0000000319') in lenders)
self.assertTrue(('5' + '0000000435') in lenders)
self.assertTrue(('3' + '0000001281') in lenders)
self.assertEqual(4, len(geos))
self.assertTrue('1122233300' in geos)
self.assertTrue('1122233400' in geos)
self.assertTrue('1122333300' in geos)
self.assertTrue('1222233300' in geos)
HMDARecord.objects.all().delete()
@patch('hmda.management.commands.load_hmda.errors')
def test_handle_errors_dict(self, errors):
errors.in_2010 = {'1122233300': '9988877766'}
command = Command()
command.stdout = Mock()
command.handle(os.path.join("hmda", "tests", "mock_2014.csv"))
geos = set(r.geo_id for r in HMDARecord.objects.all())
self.assertEqual(4, len(geos))
# 1122233300 got replaced
self.assertTrue('9988877766' in geos)
self.assertFalse('1122233300' in geos)
HMDARecord.objects.all().delete()
def test_multi_files(self):
command = Command()
command.stdout = Mock()
main_csv_directory = os.path.abspath( os.path.join("hmda", "tests") )
main_csv_directory = main_csv_directory + "/"
command.handle(main_csv_directory , "delete_file:false", "filterhmda" )
lenders = set(r.institution_id for r in HMDARecord.objects.all())
geos = set(r.geo_id for r in HMDARecord.objects.all())
self.assertEqual(3, len(lenders))
self.assertTrue(('5' + '0000000319') in lenders)
self.assertTrue(('5' + '0000000435') in lenders)
self.assertTrue(('3' + '0000001281') in lenders)
self.assertEqual(4, len(geos))
self.assertTrue('1122233300' in geos)
self.assertTrue('1122233400' in geos)
self.assertTrue('1122333300' in geos)
self.assertTrue('1222233300' in geos)
HMDARecord.objects.all().delete()
| cc0-1.0 | 962,711,516,755,231,500 | 34.876712 | 79 | 0.635357 | false |
jpinsonault/imdb_cast_matcher | match_cast.py | 1 | 1055 | #!/usr/bin/env python
import argparse
import sys
from imdb import IMDb
args = None
def parse_args():
global args
parser = argparse.ArgumentParser()
parser.add_argument('first_movie')
parser.add_argument('second_movie')
args = parser.parse_args()
def main():
imdb = IMDb()
# Get 2 movies
first_movie = confirm_movie(imdb, args.first_movie)
second_movie = confirm_movie(imdb, args.second_movie)
imdb.update(first_movie)
imdb.update(second_movie)
print("Comparing '{}' and '{}'".format(first_movie["title"], second_movie["title"]))
# Compare cast
in_both = []
for first_movie_person in first_movie["cast"]:
for second_movie_person in second_movie["cast"]:
if first_movie_person["name"] == second_movie_person["name"]:
in_both.append(first_movie_person)
for person in in_both:
print(person["name"])
def confirm_movie(imdb, movie_name):
return imdb.search_movie(movie_name)[0]
if __name__ == '__main__':
parse_args()
main() | mit | -4,238,202,857,582,957,000 | 20.55102 | 88 | 0.629384 | false |
wbphelps/ISSTracker | showGPS.py | 1 | 4984 | # show GPS page
from datetime import datetime, timedelta
import pygame
from pygame.locals import *
import math
from plotSky import plotSky
R90 = math.radians(90) # 90 degrees in radians
class showGPS():
def getxy(self, alt, azi): # alt, az in radians
# thanks to John at Wobbleworks for the algorithm
r = (R90 - alt)/R90
x = r * math.sin(azi)
y = r * math.cos(azi)
if self.flip:
x = int(self.centerX - x * self.D) # flip E/W, scale to radius, center on plot
else:
x = int(self.centerX + x * self.D) # flip E/W, scale to radius, center on plot
y = int(self.centerY - y * self.D) # scale to radius, center on plot
return (x,y)
def __init__(self, Screen, Colors, gps, obs, sun, x=0, y=0, flip=False):
self.Screen = Screen
self.Colors = Colors
self.pos = (x,y)
self.flip = flip
self.window = Screen.copy()
rect = self.window.get_rect()
self.height = rect.height
self.width = rect.width
self.D = self.height/2 - 2
self.centerX = self.width/2 + 2
self.centerY = self.height/2 + 2
self.BG = Screen.copy() # make another copy for the background
self.BGupdate = datetime.now() - timedelta(seconds=61) # force BG update
self.drawBG(obs, sun) # fill in the background & draw it
def drawBG(self, obs, sun):
self.BGupdate = datetime.now()
self.Sky = plotSky(self.BG, self.Colors, obs, self.centerX, self.centerY, self.D, flip=False) # draw the sky background & compass points
self.Sky.plotStars(obs) # add stars
self.Sky.plotPlanets(obs) # add planets
def plot(self, gps, obs, sun):
# fName = 'Monospac821 BT'
# test = pygame.font.match_font(fName, bold=True) # check to see if it's installed
# if test == None:
fName = 'DejaVuSansMono' # use this one instead
if (datetime.now() - self.BGupdate).total_seconds() > 60:
self.drawBG(obs, sun) # update background image once a minute
self.window.blit(self.BG,(0,0)) # paint background image
line = 0
txtColor = self.Colors.Yellow
txtFont = pygame.font.SysFont(fName, 15, bold=True)
t1 = txtFont.render(gps.datetime.strftime('%H:%M:%S'), 1, txtColor) # time
t1r = t1.get_rect()
self.window.blit(t1, (0,0)) # time
line += t1r.height
t2 = txtFont.render(gps.datetime.strftime('%Y/%m/%d'), 1, txtColor) # date
t2r = t2.get_rect()
self.window.blit(t2, (self.width - t2r.width, 0))
e1 = txtFont.render('({})'.format(gps.error_count), 1, self.Colors.Red)
e1r = e1.get_rect()
self.window.blit(e1, (self.width - e1r.width, t2r.height))
# draw a circle for each satellite
satFont = pygame.font.SysFont(fName, 9, bold=True)
# TODO: detect collision and move label ?
ns = 0
nsa = 0
for sat in gps.satellites: # plot all GPS satellites on sky chart
if (sat.alt,sat.azi) == (0,0): pass
xy = self.getxy(sat.alt,sat.azi)
ns += 1
sz = sat.snr
if sz>0: nsa += 1
if sz<5: color = self.Colors.Red # no signal
elif sz<20: color = self.Colors.Yellow
else: color = self.Colors.Green
if sz<9: sz = 9 # minimum circle size
pygame.draw.circle(self.window, color, xy, sz, 1)
# tsat = satFont.render(format(sat.svn), 1, self.Colors.White)
tsat = satFont.render(format(sat.svn), 1, self.Colors.White, self.Sky.bgColor)
tpos = tsat.get_rect()
tpos.centerx = xy[0]
tpos.centery = xy[1]
self.window.blit(tsat,tpos)
# txtFont = pygame.font.SysFont(fName, 15, bold=True)
s1 = txtFont.render('{}/{}'.format(gps.status,gps.quality), 1, txtColor)
s1r = s1.get_rect()
self.window.blit(s1,(1,line))
line += s1r.height
s2 = txtFont.render('{:0>2}/{:0>2}'.format(nsa, ns), 1, txtColor)
s2r = s2.get_rect()
self.window.blit(s2,(1,line))
line += s2r.height
tdil = txtFont.render('{:0.1f}m'.format(gps.hDilution), 1, txtColor)
tdilr = tdil.get_rect()
self.window.blit(tdil, (1, line))
# line += tdilr.height
line = self.height
if gps.quality == 2 or gps.hDilution < 2:
fmt = '{:7.5f}' # differential GPS - 1 meter accuracy!!!
else:
fmt = '{:6.4f}' # normal signal
tlon = txtFont.render(fmt.format(math.degrees(gps.avg_longitude)), 1, txtColor)
tlonr = tlon.get_rect()
line -= tlonr.height
self.window.blit(tlon, (self.width - tlonr.width, line))
tlat = txtFont.render(fmt.format(math.degrees(gps.avg_latitude)), 1, txtColor)
tlatr = tlat.get_rect()
line -= tlatr.height
self.window.blit(tlat, (self.width - tlatr.width, line))
alt = gps.altitude #+ gps.geodiff
if alt<100:
talt = '{:6.1f}m'.format(alt)
else:
talt = '{:6.0f}m'.format(alt)
talt = txtFont.render(talt, 1, txtColor)
taltr = talt.get_rect()
line -= taltr.height
self.window.blit(talt, (self.width - taltr.width, line))
self.Screen.blit(self.window,self.pos)
pygame.display.update() #flip()
| gpl-2.0 | 6,263,011,145,181,974,000 | 31.789474 | 140 | 0.62179 | false |
kylejusticemagnuson/pyti | tests/test_linear_weighted_moving_average.py | 1 | 9719 | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import linear_weighted_moving_average
class TestLinearWeightedMovingAverage(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.lwma_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
807.08190476190487, 809.26714285714286, 811.36904761904759,
812.11285714285714, 811.71904761904773, 812.9895238095238,
814.52619047619066, 814.94333333333338, 813.08523809523808,
812.01761904761918, 807.01523809523803, 801.65428571428561,
793.07619047619039, 787.06714285714281, 778.89428571428573,
772.75285714285724, 772.05999999999995, 776.16000000000008,
779.3138095238096, 780.41285714285721, 781.07476190476189,
782.75857142857149, 783.40095238095228, 781.09571428571439,
782.34333333333325, 781.80761904761903, 779.90523809523802,
771.91714285714284, 770.73666666666668, 772.61761904761897,
781.5214285714286, 791.3257142857143, 796.76333333333332,
794.94142857142856, 792.10333333333324, 790.54809523809536,
793.48000000000013, 797.67999999999995, 804.7390476190476,
810.33190476190487, 815.5557142857142, 820.72190476190463,
826.03142857142859, 826.56285714285718, 825.72571428571416,
826.34999999999991, 824.91047619047629, 819.3142857142858,
814.11190476190473, 810.24380952380966, 809.52428571428561,
808.64571428571435, 809.91571428571422, 807.68857142857144,
806.43047619047627, 804.71619047619049, 803.58333333333337,
802.18952380952373, 802.28476190476192, 802.45571428571429,
804.65476190476193, 806.71285714285705, 806.00999999999999,
808.83095238095245, 811.2404761904761, 809.96380952380946,
807.07333333333338, 803.41809523809513, 801.23047619047622,
800.19714285714281, 796.74047619047633, 794.00047619047609,
794.9228571428572, 792.93666666666672, 795.38238095238091,
799.06761904761913, 802.43952380952373, 801.78952380952387,
799.18857142857144, 796.49809523809517, 794.33380952380946,
793.8038095238096, 793.11047619047622, 792.57333333333338,
792.9585714285713, 794.09333333333325, 795.00999999999999,
796.4585714285713, 798.61047619047622, 801.07142857142856,
801.6514285714286, 803.23285714285714, 804.67952380952386,
805.97380952380956, 806.94380952380948, 807.35190476190473,
807.01904761904768, 806.97952380952381, 804.1280952380954,
802.22047619047612, 801.09761904761899, 800.6076190476191,
797.68619047619052, 788.14047619047619, 779.4457142857143,
771.19666666666672, 764.87571428571425, 761.0861904761905,
757.88904761904757, 757.21333333333337, 755.90619047619043,
754.9466666666666, 749.17904761904754, 744.50238095238092,
739.13809523809527, 735.8266666666666, 732.16809523809525,
727.0661904761904, 720.64523809523803, 716.06380952380948,
711.59428571428566, 709.8271428571428]
self.lwma_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, 809.88111111111118, 811.13916666666671,
811.24916666666672, 812.4569444444445, 813.85472222222222,
814.46472222222224, 813.21194444444461, 812.35305555555556,
808.39722222222235, 804.17555555555555, 797.14027777777778,
791.49333333333323, 783.82833333333338, 777.61861111111114,
775.39250000000004, 777.06916666666666, 778.49055555555549,
778.93027777777775, 779.41638888888895, 781.15750000000003,
782.66583333333335, 781.64694444444444, 782.56916666666666,
781.83249999999998, 780.28749999999991, 774.16222222222211,
772.9041666666667, 773.74527777777769, 780.33944444444444,
787.77611111111105, 792.00138888888898, 791.13472222222219,
790.47916666666663, 791.01277777777773, 794.3597222222221,
797.75194444444458, 802.46833333333336, 806.30555555555543,
811.04916666666668, 816.66861111111109, 822.56777777777768,
824.38388888888881, 824.67000000000007, 825.7208333333333,
825.04972222222227, 821.06944444444446, 816.88333333333344,
813.06777777777779, 811.64083333333338, 810.39083333333338,
810.56555555555553, 807.95638888888891, 806.60666666666668,
805.35722222222228, 804.61027777777781, 803.37805555555553,
803.06638888888892, 802.65333333333331, 804.08166666666659,
805.77305555555552, 805.39055555555558, 807.75555555555547,
809.95916666666665, 809.37055555555571, 807.48611111111109,
804.74499999999989, 802.7786111111111, 801.87249999999995,
798.8369444444445, 795.75333333333344, 795.64305555555575,
793.71083333333331, 795.42944444444447, 798.20749999999998,
800.64222222222224, 800.32000000000005, 798.90333333333331,
797.11611111111108, 795.75750000000005, 795.45722222222219,
794.3752777777778, 793.13388888888892, 792.86722222222215,
793.66555555555567, 794.55166666666673, 795.88361111111112,
797.63222222222214, 799.67333333333329, 800.50777777777773,
802.18083333333334, 803.68888888888898, 805.07888888888874,
806.21083333333343, 806.79777777777781, 806.66722222222222,
806.84555555555551, 804.83083333333332, 803.29666666666662,
802.22472222222223, 801.52749999999992, 798.89861111111111,
791.07944444444456, 783.55694444444453, 776.27611111111116,
770.33527777777772, 765.88916666666671, 761.46194444444438,
758.89777777777772, 756.73638888888888, 755.58194444444439,
750.87416666666672, 746.93583333333345, 742.19999999999993,
738.91638888888883, 735.19638888888892, 730.13888888888891,
723.83527777777772, 719.19361111111118, 714.87388888888881,
712.59916666666663]
self.lwma_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, 810.30218181818191, 811.7467272727273,
813.17672727272725, 813.87018181818178, 813.01509090909099,
812.47872727272727, 809.31600000000003, 805.74436363636369,
799.76963636363644, 794.87527272727277, 788.10236363636363,
782.13090909090909, 779.19254545454555, 779.48345454545461,
779.58163636363645, 779.06545454545449, 778.86290909090906,
780.04527272727273, 781.3089090909092, 780.81036363636372,
782.20545454545459, 782.08236363636377, 780.79836363636355,
775.53836363636367, 774.30600000000004, 774.88163636363629,
780.05563636363627, 785.90800000000002, 789.46072727272724,
788.89472727272721, 788.43363636363642, 789.17745454545445,
792.86963636363635, 796.81418181818185, 801.48454545454535,
804.88381818181824, 808.47290909090918, 812.9345454545454,
818.40218181818182, 821.13327272727258, 822.64145454545451,
824.42527272727273, 824.48781818181817, 821.59563636363634,
818.3518181818182, 815.28545454545463, 813.85854545454549,
812.21945454545471, 811.76181818181806, 809.24909090909102,
807.59181818181821, 805.9354545454546, 805.03290909090913,
804.03218181818193, 803.81799999999998, 803.35545454545456,
804.2681818181818, 805.33381818181817, 804.90927272727265,
806.93236363636356, 808.87636363636375, 808.60090909090911,
807.30181818181802, 805.25927272727279, 803.77654545454561,
802.99727272727273, 800.28545454545451, 797.61363636363637,
797.17836363636354, 794.93272727272733, 795.77945454545454,
797.80090909090916, 799.774, 799.56999999999994, 798.34454545454537,
796.95672727272733, 796.1521818181817, 796.03018181818186,
795.3063636363637, 794.33527272727281, 793.73109090909088,
793.83854545454551, 794.21872727272728, 795.29109090909094,
796.87654545454541, 798.74109090909076, 799.55345454545454,
801.0645454545454, 802.58945454545449, 804.072, 805.30000000000018,
806.07363636363641, 806.2401818181819, 806.56527272727271,
804.99799999999993, 803.83436363636361, 803.00709090909095,
802.34836363636373, 800.03545454545463, 793.37181818181818,
786.77472727272732, 780.23800000000006, 774.58363636363629,
770.11672727272719, 765.6307272727272, 762.40909090909099,
759.28090909090906, 756.96381818181817, 752.32018181818182,
748.72036363636357, 744.51763636363637, 741.45654545454545,
737.88709090909083, 733.15618181818172, 727.26709090909083,
722.54618181818182, 717.95818181818174, 715.2269090909092]
def test_lwma_period_6(self):
period = 6
lwma = linear_weighted_moving_average.linear_weighted_moving_average(self.data, period)
np.testing.assert_array_equal(lwma, self.lwma_period_6_expected)
def test_lwma_period_8(self):
period = 8
lwma = linear_weighted_moving_average.linear_weighted_moving_average(self.data, period)
np.testing.assert_array_equal(lwma, self.lwma_period_8_expected)
def test_lwma_period_10(self):
period = 10
lwma = linear_weighted_moving_average.linear_weighted_moving_average(self.data, period)
np.testing.assert_array_equal(lwma, self.lwma_period_10_expected)
def test_lwma_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
linear_weighted_moving_average.linear_weighted_moving_average(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| mit | 9,135,459,874,037,574,000 | 59.36646 | 95 | 0.73629 | false |
adist/drunken-sansa | openerp/addons/pln_dprr/hr_employee.py | 1 | 2048 | '''
Created on Feb 18, 2015
@author: adista@bizoft
'''
from openerp.osv import fields, osv
class hr_employee_category(osv.osv):
_inherit = 'hr.employee.category'
_columns = {
'code' : fields.char(string='Kode'),
'type' : fields.selection([('area', 'Area'), ('unitup', 'Unit Up')], string='Tipe', required=True),
}
def name_get(self, cr, uid, ids, context=None):
res = super(hr_employee_category, self).name_get(cr, uid, ids, context=context)
res = dict(res)
for this in self.browse(cr, uid, res.keys()):
if not this.parent_id:
continue
res[this.id] = this.code and ' - '.join([this.code, res[this.id]]) or res[this.id]
return res.items()
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('code', operator, name)] + args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
hr_employee_category()
class hr_employee(osv.osv):
_inherit = 'hr.employee'
def create(self, cr, uid, vals, context={}):
res = super(hr_employee, self).create(cr, uid, vals, context={})
if not res:
return res
category_ids = vals['category_ids'][0][-1]
if len(category_ids) != 1:
raise
o_hr_categ = self.pool.get('hr.employee.category').browse(cr, uid, category_ids[-1])
user_vals = {
'name' : vals['name'],
'login' : '_'.join([str(o_hr_categ.code).lower(), vals['name'].lower()]),
'password' : ''.join([vals['name'].lower(),'123']),
'employee' : True,
}
o_user = self.pool.get('res.users').create(cr, uid, user_vals, context=context)
self.write(cr, uid, [res], {'user_id' : o_user}, context=context)
return res
hr_employee() | agpl-3.0 | 7,014,781,787,241,007,000 | 38.403846 | 109 | 0.561523 | false |
claudep/translate | translate/convert/xliff2odf.py | 1 | 6253 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2014 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert XLIFF translation files to OpenDocument (ODF) files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/odf2xliff.html
for examples and usage instructions.
"""
import six
import zipfile
from io import BytesIO
import lxml.etree as etree
from translate.convert import convert
from translate.storage import factory
from translate.storage.odf_io import copy_odf, open_odf
from translate.storage.odf_shared import (inline_elements,
no_translate_content_elements)
from translate.storage.xml_extract.extract import ParseState
from translate.storage.xml_extract.generate import (apply_translations,
replace_dom_text)
from translate.storage.xml_extract.unit_tree import XPathTree, build_unit_tree
def translate_odf(template, input_file):
def load_dom_trees(template):
"""Return a dict with translatable files in the template ODF package.
The keys are the filenames inside the ODF package, and the values are
the etrees for each of those translatable files.
"""
odf_data = open_odf(template)
return dict((filename, etree.parse(BytesIO(data)))
for filename, data in six.iteritems(odf_data))
def load_unit_tree(input_file):
"""Return a dict with the translations grouped by files ODF package.
The keys are the filenames inside the template ODF package, and the
values are XPathTree instances for each of those files.
"""
store = factory.getobject(input_file)
tree = build_unit_tree(store)
def extract_unit_tree(filename, root_dom_element_name):
"""Find the subtree in 'tree' which corresponds to the data in XML
file 'filename'.
"""
try:
file_tree = tree.children[root_dom_element_name, 0]
except KeyError:
file_tree = XPathTree()
return (filename, file_tree)
return dict([extract_unit_tree('content.xml', 'office:document-content'),
extract_unit_tree('meta.xml', 'office:document-meta'),
extract_unit_tree('styles.xml', 'office:document-styles')])
def translate_dom_trees(unit_trees, dom_trees):
"""Return a dict with the translated files for the ODF package.
The keys are the filenames for the translatable files inside the
template ODF package, and the values are etree ElementTree instances
for each of those files.
"""
make_parse_state = lambda: ParseState(no_translate_content_elements,
inline_elements)
for filename, dom_tree in six.iteritems(dom_trees):
file_unit_tree = unit_trees[filename]
apply_translations(dom_tree.getroot(), file_unit_tree,
replace_dom_text(make_parse_state))
return dom_trees
dom_trees = load_dom_trees(template)
unit_trees = load_unit_tree(input_file)
return translate_dom_trees(unit_trees, dom_trees)
def write_odf(template, output_file, dom_trees):
"""Write the translated ODF package.
The resulting ODF package is a copy of the template ODF package, with the
translatable files replaced by their translated versions.
"""
template_zip = zipfile.ZipFile(template, 'r')
output_zip = zipfile.ZipFile(output_file, 'w',
compression=zipfile.ZIP_DEFLATED)
# Copy the ODF package.
output_zip = copy_odf(template_zip, output_zip, dom_trees.keys())
# Overwrite the translated files to the ODF package.
for filename, dom_tree in six.iteritems(dom_trees):
output_zip.writestr(filename, etree.tostring(dom_tree,
encoding='UTF-8',
xml_declaration=True))
def convertxliff(input_file, output_file, template):
"""Create a translated ODF using an ODF template and a XLIFF file."""
xlf_data = input_file.read()
dom_trees = translate_odf(template, BytesIO(xlf_data))
write_odf(template, output_file, dom_trees)
output_file.close()
return True
def main(argv=None):
formats = {
('xlf', 'odt'): ("odt", convertxliff), # Text
('xlf', 'ods'): ("ods", convertxliff), # Spreadsheet
('xlf', 'odp'): ("odp", convertxliff), # Presentation
('xlf', 'odg'): ("odg", convertxliff), # Drawing
('xlf', 'odc'): ("odc", convertxliff), # Chart
('xlf', 'odf'): ("odf", convertxliff), # Formula
('xlf', 'odi'): ("odi", convertxliff), # Image
('xlf', 'odm'): ("odm", convertxliff), # Master Document
('xlf', 'ott'): ("ott", convertxliff), # Text template
('xlf', 'ots'): ("ots", convertxliff), # Spreadsheet template
('xlf', 'otp'): ("otp", convertxliff), # Presentation template
('xlf', 'otg'): ("otg", convertxliff), # Drawing template
('xlf', 'otc'): ("otc", convertxliff), # Chart template
('xlf', 'otf'): ("otf", convertxliff), # Formula template
('xlf', 'oti'): ("oti", convertxliff), # Image template
('xlf', 'oth'): ("oth", convertxliff), # Web page template
}
parser = convert.ConvertOptionParser(formats, usetemplates=True, description=__doc__)
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 | 5,758,427,300,691,318,000 | 39.869281 | 96 | 0.629778 | false |
yarikoptic/Fail2Ban-Old-SVNGIT | server/filter.py | 1 | 15235 | # This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision$
__author__ = "Cyril Jaquier"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
from failmanager import FailManager
from ticket import FailTicket
from jailthread import JailThread
from datedetector import DateDetector
from mytime import MyTime
from failregex import FailRegex, Regex, RegexException
import logging, re, os, fcntl, time
# Gets the instance of the logger.
logSys = logging.getLogger("fail2ban.filter")
##
# Log reader class.
#
# This class reads a log file and detects login failures or anything else
# that matches a given regular expression. This class is instanciated by
# a Jail object.
class Filter(JailThread):
##
# Constructor.
#
# Initialize the filter object with default values.
# @param jail the jail object
def __init__(self, jail):
JailThread.__init__(self)
## The jail which contains this filter.
self.jail = jail
## The failures manager.
self.failManager = FailManager()
## The regular expression list matching the failures.
self.__failRegex = list()
## The regular expression list with expressions to ignore.
self.__ignoreRegex = list()
## The amount of time to look back.
self.__findTime = 6000
## The ignore IP list.
self.__ignoreIpList = []
self.dateDetector = DateDetector()
self.dateDetector.addDefaultTemplate()
logSys.debug("Created Filter")
##
# Add a regular expression which matches the failure.
#
# The regular expression can also match any other pattern than failures
# and thus can be used for many purporse.
# @param value the regular expression
def addFailRegex(self, value):
try:
regex = FailRegex(value)
self.__failRegex.append(regex)
except RegexException, e:
logSys.error(e)
def delFailRegex(self, index):
try:
del self.__failRegex[index]
except IndexError:
logSys.error("Cannot remove regular expression. Index %d is not "
"valid" % index)
##
# Get the regular expression which matches the failure.
#
# @return the regular expression
def getFailRegex(self):
failRegex = list()
for regex in self.__failRegex:
failRegex.append(regex.getRegex())
return failRegex
##
# Add the regular expression which matches the failure.
#
# The regular expression can also match any other pattern than failures
# and thus can be used for many purporse.
# @param value the regular expression
def addIgnoreRegex(self, value):
try:
regex = Regex(value)
self.__ignoreRegex.append(regex)
except RegexException, e:
logSys.error(e)
def delIgnoreRegex(self, index):
try:
del self.__ignoreRegex[index]
except IndexError:
logSys.error("Cannot remove regular expression. Index %d is not "
"valid" % index)
##
# Get the regular expression which matches the failure.
#
# @return the regular expression
def getIgnoreRegex(self):
ignoreRegex = list()
for regex in self.__ignoreRegex:
ignoreRegex.append(regex.getRegex())
return ignoreRegex
##
# Set the time needed to find a failure.
#
# This value tells the filter how long it has to take failures into
# account.
# @param value the time
def setFindTime(self, value):
self.__findTime = value
self.failManager.setMaxTime(value)
logSys.info("Set findtime = %s" % value)
##
# Get the time needed to find a failure.
#
# @return the time
def getFindTime(self):
return self.__findTime
##
# Set the maximum retry value.
#
# @param value the retry value
def setMaxRetry(self, value):
self.failManager.setMaxRetry(value)
logSys.info("Set maxRetry = %s" % value)
##
# Get the maximum retry value.
#
# @return the retry value
def getMaxRetry(self):
return self.failManager.getMaxRetry()
##
# Main loop.
#
# This function is the main loop of the thread. It checks if the
# file has been modified and looks for failures.
# @return True when the thread exits nicely
def run(self):
raise Exception("run() is abstract")
##
# Ban an IP - http://blogs.buanzo.com.ar/2009/04/fail2ban-patch-ban-ip-address-manually.html
# Arturo 'Buanzo' Busleiman <[email protected]>
#
# to enable banip fail2ban-client BAN command
def addBannedIP(self, ip):
unixTime = time.time()
self.failManager.addFailure(FailTicket(ip, unixTime))
return ip
##
# Add an IP/DNS to the ignore list.
#
# IP addresses in the ignore list are not taken into account
# when finding failures. CIDR mask and DNS are also accepted.
# @param ip IP address to ignore
def addIgnoreIP(self, ip):
logSys.debug("Add " + ip + " to ignore list")
self.__ignoreIpList.append(ip)
def delIgnoreIP(self, ip):
logSys.debug("Remove " + ip + " from ignore list")
self.__ignoreIpList.remove(ip)
def getIgnoreIP(self):
return self.__ignoreIpList
##
# Check if IP address/DNS is in the ignore list.
#
# Check if the given IP address matches an IP address/DNS or a CIDR
# mask in the ignore list.
# @param ip IP address
# @return True if IP address is in ignore list
def inIgnoreIPList(self, ip):
for i in self.__ignoreIpList:
# An empty string is always false
if i == "":
continue
s = i.split('/', 1)
# IP address without CIDR mask
if len(s) == 1:
s.insert(1, '32')
s[1] = long(s[1])
try:
a = DNSUtils.cidr(s[0], s[1])
b = DNSUtils.cidr(ip, s[1])
except Exception:
# Check if IP in DNS
ips = DNSUtils.dnsToIp(i)
if ip in ips:
return True
else:
continue
if a == b:
return True
return False
def processLine(self, line):
try:
# Decode line to UTF-8
l = line.decode('utf-8')
except UnicodeDecodeError:
l = line
timeMatch = self.dateDetector.matchTime(l)
if timeMatch:
# Lets split into time part and log part of the line
timeLine = timeMatch.group()
# Lets leave the beginning in as well, so if there is no
# anchore at the beginning of the time regexp, we don't
# at least allow injection. Should be harmless otherwise
logLine = l[:timeMatch.start()] + l[timeMatch.end():]
else:
timeLine = l
logLine = l
return self.findFailure(timeLine, logLine)
def processLineAndAdd(self, line):
for element in self.processLine(line):
ip = element[0]
unixTime = element[1]
logSys.debug("Processing line with time:%s and ip:%s"
% (unixTime, ip))
if unixTime < MyTime.time() - self.getFindTime():
logSys.debug("Ignore line since time %s < %s - %s"
% (unixTime, MyTime.time(), self.getFindTime()))
break
if self.inIgnoreIPList(ip):
logSys.debug("Ignore %s" % ip)
continue
logSys.debug("Found %s" % ip)
self.failManager.addFailure(FailTicket(ip, unixTime))
##
# Returns true if the line should be ignored.
#
# Uses ignoreregex.
# @param line: the line
# @return: a boolean
def ignoreLine(self, line):
for ignoreRegex in self.__ignoreRegex:
ignoreRegex.search(line)
if ignoreRegex.hasMatched():
return True
return False
##
# Finds the failure in a line given split into time and log parts.
#
# Uses the failregex pattern to find it and timeregex in order
# to find the logging time.
# @return a dict with IP and timestamp.
def findFailure(self, timeLine, logLine):
failList = list()
# Checks if we must ignore this line.
if self.ignoreLine(logLine):
# The ignoreregex matched. Return.
return failList
# Iterates over all the regular expressions.
for failRegex in self.__failRegex:
failRegex.search(logLine)
if failRegex.hasMatched():
# The failregex matched.
date = self.dateDetector.getUnixTime(timeLine)
if date == None:
logSys.debug("Found a match for '" + logLine +"' but no "
+ "valid date/time found for '"
+ timeLine + "'. Please contact the "
+ "author in order to get support for this "
+ "format")
else:
try:
host = failRegex.getHost()
ipMatch = DNSUtils.textToIp(host)
if ipMatch:
for ip in ipMatch:
failList.append([ip, date])
# We matched a regex, it is enough to stop.
break
except RegexException, e:
logSys.error(e)
return failList
##
# Get the status of the filter.
#
# Get some informations about the filter state such as the total
# number of failures.
# @return a list with tuple
def status(self):
ret = [("Currently failed", self.failManager.size()),
("Total failed", self.failManager.getFailTotal())]
return ret
class FileFilter(Filter):
def __init__(self, jail):
Filter.__init__(self, jail)
## The log file path.
self.__logPath = []
##
# Add a log file path
#
# @param path log file path
def addLogPath(self, path, tail = False):
container = FileContainer(path, tail)
self.__logPath.append(container)
##
# Delete a log path
#
# @param path the log file to delete
def delLogPath(self, path):
for log in self.__logPath:
if log.getFileName() == path:
self.__logPath.remove(log)
return
##
# Get the log file path
#
# @return log file path
def getLogPath(self):
return self.__logPath
##
# Check whether path is already monitored.
#
# @param path The path
# @return True if the path is already monitored else False
def containsLogPath(self, path):
for log in self.__logPath:
if log.getFileName() == path:
return True
return False
def getFileContainer(self, path):
for log in self.__logPath:
if log.getFileName() == path:
return log
return None
##
# Gets all the failure in the log file.
#
# Gets all the failure in the log file which are newer than
# MyTime.time()-self.findTime. When a failure is detected, a FailTicket
# is created and is added to the FailManager.
def getFailures(self, filename):
container = self.getFileContainer(filename)
if container == None:
logSys.error("Unable to get failures in " + filename)
return False
# Try to open log file.
try:
container.open()
except Exception, e:
logSys.error("Unable to open %s" % filename)
logSys.exception(e)
return False
line = container.readline()
while not line == "":
if not self._isActive():
# The jail has been stopped
break
self.processLineAndAdd(line)
# Read a new line.
line = container.readline()
container.close()
return True
def status(self):
ret = Filter.status(self)
path = [m.getFileName() for m in self.getLogPath()]
ret.append(("File list", path))
return ret
##
# FileContainer class.
#
# This class manages a file handler and takes care of log rotation detection.
# In order to detect log rotation, the hash (MD5) of the first line of the file
# is computed and compared to the previous hash of this line.
import md5
class FileContainer:
def __init__(self, filename, tail = False):
self.__filename = filename
self.__tail = tail
self.__handler = None
# Try to open the file. Raises an exception if an error occured.
handler = open(filename)
stats = os.fstat(handler.fileno())
self.__ino = stats.st_ino
try:
firstLine = handler.readline()
# Computes the MD5 of the first line.
self.__hash = md5.new(firstLine).digest()
# Start at the beginning of file if tail mode is off.
if tail:
handler.seek(0, 2)
self.__pos = handler.tell()
else:
self.__pos = 0
finally:
handler.close()
def getFileName(self):
return self.__filename
def open(self):
self.__handler = open(self.__filename)
# Set the file descriptor to be FD_CLOEXEC
fd = self.__handler.fileno()
fcntl.fcntl(fd, fcntl.F_SETFD, fd | fcntl.FD_CLOEXEC)
firstLine = self.__handler.readline()
# Computes the MD5 of the first line.
myHash = md5.new(firstLine).digest()
stats = os.fstat(self.__handler.fileno())
# Compare hash and inode
if self.__hash != myHash or self.__ino != stats.st_ino:
logSys.info("Log rotation detected for %s" % self.__filename)
self.__hash = myHash
self.__ino = stats.st_ino
self.__pos = 0
# Sets the file pointer to the last position.
self.__handler.seek(self.__pos)
def readline(self):
if self.__handler == None:
return ""
return self.__handler.readline()
def close(self):
if not self.__handler == None:
# Saves the last position.
self.__pos = self.__handler.tell()
# Closes the file.
self.__handler.close()
self.__handler = None
##
# Utils class for DNS and IP handling.
#
# This class contains only static methods used to handle DNS and IP
# addresses.
import socket, struct
class DNSUtils:
IP_CRE = re.compile("^(?:\d{1,3}\.){3}\d{1,3}$")
#@staticmethod
def dnsToIp(dns):
""" Convert a DNS into an IP address using the Python socket module.
Thanks to Kevin Drapel.
"""
try:
return socket.gethostbyname_ex(dns)[2]
except socket.gaierror:
logSys.warn("Unable to find a corresponding IP address for %s"
% dns)
return list()
dnsToIp = staticmethod(dnsToIp)
#@staticmethod
def searchIP(text):
""" Search if an IP address if directly available and return
it.
"""
match = DNSUtils.IP_CRE.match(text)
if match:
return match
else:
return None
searchIP = staticmethod(searchIP)
#@staticmethod
def isValidIP(string):
""" Return true if str is a valid IP
"""
s = string.split('/', 1)
try:
socket.inet_aton(s[0])
return True
except socket.error:
return False
isValidIP = staticmethod(isValidIP)
#@staticmethod
def textToIp(text):
""" Return the IP of DNS found in a given text.
"""
ipList = list()
# Search for plain IP
plainIP = DNSUtils.searchIP(text)
if not plainIP == None:
plainIPStr = plainIP.group(0)
if DNSUtils.isValidIP(plainIPStr):
ipList.append(plainIPStr)
if not ipList:
# Try to get IP from possible DNS
ip = DNSUtils.dnsToIp(text)
for e in ip:
ipList.append(e)
return ipList
textToIp = staticmethod(textToIp)
#@staticmethod
def cidr(i, n):
""" Convert an IP address string with a CIDR mask into a 32-bit
integer.
"""
# 32-bit IPv4 address mask
MASK = 0xFFFFFFFFL
return ~(MASK >> n) & MASK & DNSUtils.addr2bin(i)
cidr = staticmethod(cidr)
#@staticmethod
def addr2bin(string):
""" Convert a string IPv4 address into an unsigned integer.
"""
return struct.unpack("!L", socket.inet_aton(string))[0]
addr2bin = staticmethod(addr2bin)
#@staticmethod
def bin2addr(addr):
""" Convert a numeric IPv4 address into string n.n.n.n form.
"""
return socket.inet_ntoa(struct.pack("!L", addr))
bin2addr = staticmethod(bin2addr)
| gpl-2.0 | 3,093,289,645,956,499,000 | 24.519263 | 93 | 0.679685 | false |
sumedh123/debatify | app/models.py | 1 | 6706 | import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from flask import request
#from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask_login import UserMixin,AnonymousUserMixin
from app import login_manager
from app import db
from datetime import datetime
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
timestamp = db.Column(db.DateTime, default = datetime.utcnow)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
firstname = db.Column(db.String(50),nullable = True)
lastname = db.Column(db.String(50),nullable = True)
email = db.Column(db.String(50),nullable = True)
username = db.Column(db.String(64),nullable = True)
password = db.Column(db.String(100),nullable = True)
password_hash = db.Column(db.String(128), nullable = True)
confirmed = db.Column(db.Boolean, default = False)
question = db.relationship("Question", backref = "owner", lazy = 'dynamic')
location = db.Column(db.String(64),nullable = True)
about_me = db.Column(db.Text(),nullable = True)
member_since = db.Column(db.DateTime(), default=datetime.utcnow,nullable = True)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow,nullable = True)
posts = db.relationship('Posts', backref = 'author', lazy = 'dynamic')
fetchedChat = db.relationship('Chats', backref = 'messenger', lazy = 'dynamic')
followed = db.relationship('Follow', foreign_keys = [Follow.follower_id],backref=db.backref('follower', lazy='joined'),lazy='dynamic',cascade='all, delete-orphan')
followers = db.relationship('Follow', foreign_keys = [Follow.followed_id], backref = db.backref('followed', lazy = 'joined'),lazy='dynamic',cascade='all, delete-orphan')
#role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
# def __init__(self, **kwargs):
# super(User, self).__init__(**kwargs)
# if self.role is None:
# if self.email == current_app.config['FLASKY_ADMIN']:
# self.role = Role.query.filter_by(permissions=0xff).first()
# if self.role is None:
# self.role = Role.query.filter_by(default=True).first()
# def __repr__(self):
# return "<User %s>" % self.firstname
#Related to werkzeug security
@property
def password(self):
raise AttributeError('password is not a readable attribute')
#Used for generating hashes of passwords
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
#Verification of password n database
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower = self, followed = user)
db.session.add(f)
db.session.commit()
def unfollow(self, user):
f = self.followed.filter_by(followed_id = user.id).first()
if f:
db.session.delete(f)
db.session.commit()
def is_following(self, user):
return self.followed.filter_by(followed_id = user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(follower_id = user.id).first() is not None
# Another table containing questions of users
class Question(db.Model):
__tablename__ = "questions"
id = db.Column(db.Integer, primary_key = True)
questions = db.Column(db.String(500))
topic = db.Column(db.String(500))
link = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
question = db.relationship("Chats", backref = "chat_no", lazy = 'dynamic')
upvotes = db.Column(db.Integer, nullable = True, default = 1)
class Chats(db.Model):
__tablename__ = "chats"
id = db.Column(db.Integer, primary_key = True)
messages = db.Column(db.String)
time = db.Column(db.String(100))
chat_id = db.Column(db.Integer, db.ForeignKey('questions.id'))
sender_name = db.Column(db.String, nullable = True)
messenger_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class Posts(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index = True, default = datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# class Role():
# __tablename__ = 'roles'
# id = db.Column(db.Integer,primary_key = True)
# name = db.Column(db.String(64), unique = True)
# default = db.Column(db.Boolean, default = False, index = True)
# permissions = db.Column(db.Integer)
# users = db.relationship('User', backref = 'role', lazy = 'dynamic')
# def can(self,permissions):
# return self.role is not None and (self.role.permissions & permissions) == permissions
# def is_administrator(self):
# return self.can(Permission.ADMINISTER)
# @staticmethod
# def insert_roles():
# roles = {
# 'User': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES, True),
# 'Moderator': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES | Permission.MODERATE_COMMENTS, False),
# 'Administrator': (0xff, False)
# }
# for r in roles:
# role = Role.query.filter_by(name = r).first()
# if role is None:
# role = Role(name = r)
# role.permissions = roles[r][0]
# role.default = roles[r][1]
# db.session.add(role)
# db.session.commit()
# class Permission:
# FOLLOW = 0x01
# COMMENT = 0x02
# WRITE_ARTICLES = 0x04
# MODERATE_COMMENTS = 0x08
# ADMINISTER = 0x80
# class AnonymousUser(AnonymousUserMixin):
# def can(self,permissions):
# return False
# def is_administrator(self):
# return False
# login_manager.anonymous_user = AnonymousUser
# def generate_confirmation_token(self, expiration = 120):
# s = Serializer(app.config['SERIAL_KEY'],expiration)
# return s.dumps({'confirm' : self.id})
# def confirm(self, token):
# s = Serializer(current_app.config['SECRET_KEY'])
# try:
# data = s.loads(token)
# except:
# return False
# if data.get('confirm') != self.id:
# return False
# self.confirmed = True
# db.session.add(self)
# return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id)) | mit | 5,551,581,122,385,706,000 | 33.932292 | 170 | 0.697435 | false |
frustreated/VDiscover | vdiscover/Backtrace.py | 1 | 2987 |
from ptrace.ctypes_tools import formatAddress, formatWordHex
from ptrace.cpu_info import CPU_WORD_SIZE, CPU_MAX_UINT
from ptrace import PtraceError
#from ptrace.six.moves import xrange
class BacktraceFrame(object):
"""
Backtrace frame.
Attributes:
- ip: instruction pointer
- name: name of the function
- arguments: value of the arguments
"""
def __init__(self, ip):
self.ip = ip
self.name = u"???"
self.arguments = []
def __str__(self):
arguments = (formatWordHex(arg) for arg in self.arguments)
return u"IP=%s: %s (%s)" % (formatAddress(self.ip), self.name, ", ".join(arguments))
class Backtrace(object):
"""
Backtrace: all process frames since the start function.
"""
def __init__(self):
self.frames = []
self.truncated = False
def append(self, frame):
self.frames.append(frame)
def __iter__(self):
return iter(self.frames)
def __len__(self):
return len(self.frames)
def getBacktrace(process, max_args=6, max_depth=20):
"""
Get the current backtrace of the specified process:
- max_args: maximum number of arguments in a frame
- max_depth: maximum number of frames
Return a Backtrace object.
"""
backtrace = Backtrace()
# Get current instruction and frame pointer
ip = process.getInstrPointer()
fp = process.getFramePointer()
depth = 0
while True:
# Hit maximum trace depth?
if max_depth <= depth:
backtrace.truncated = True
break
# Read next frame pointer
try:
nextfp = process.readWord(fp)
except PtraceError:
nextfp = None
# Guess number of function argument
if fp and nextfp:
nargs = ((nextfp - fp) // CPU_WORD_SIZE) - 2
nargs = min(nargs, max_args)
else:
nargs = 0
# Create frame
frame = getBacktraceFrame(process, ip, fp, nargs)
#print frame
#print hex(fp),hex(nextfp), hex(nargs)
backtrace.append(frame)
# End of the stack?
if not nextfp:
break
# Move to next instruction/frame pointer
ip = process.readWord(fp+CPU_WORD_SIZE)
if ip == CPU_MAX_UINT:
# Linux hack to detect end of the stack
break
fp = nextfp
depth += 1
return backtrace
def getBacktraceFrame(process, ip, fp, nargs):
"""
Get a backtrace frame:
- ip: instruction pointer
- fp: frame pointer
- nargs: number of arguments
Return a BacktraceFrame object.
"""
frame = BacktraceFrame(ip)
address = fp + CPU_WORD_SIZE
try:
for index in xrange(nargs):
address += CPU_WORD_SIZE
word = process.readWord(address)
frame.arguments.append(word)
except PtraceError:
# Ignore argument read error
pass
return frame
| gpl-3.0 | 5,460,198,075,991,017,000 | 24.75 | 92 | 0.585872 | false |
uwosh/UWOshSuccess | subscribers.py | 1 | 3977 | from Products.CMFCore.utils import getToolByName
from Products.UWOshSuccess.utils import isSuccessDebugModeEnabled, getSuccessDebugModeEmailAddress, getSuccessCCEmailAddress
import logging
logger = logging.getLogger('subscribers')
def _convertToId(string):
string = string.lower()
string = string.replace(' ', '-')
return string
def _lookupEmailBody(context, type, transition):
portal_catalog = getToolByName(context, 'portal_catalog')
documentId = '%s-%s-email-message' % (_convertToId(type), _convertToId(transition))
brains = portal_catalog.searchResults({'portal_type':'Document', 'id':documentId})
try:
brain = brains[0]
obj = brain.getObject()
return obj.getText()
except:
logger.error('Unable to locate email message template with ID: %s' % documentId)
return "The following item has been transitioned: "
blueSheetEmailRecipients = {
'submitToInstructor' : ['faculty', 'student', 'office'],
'submitToOffice' : ['faculty', 'student', 'office'],
'approveBlueSheet' : ['faculty', 'student', 'office'],
'beginWaitingForInstructorToDropOffTest' : ['faculty', 'office'],
'pickUpTest' : ['faculty', 'office'],
'testPickedUp' : ['faculty', 'office'],
'administerTest' : ['faculty', 'office'],
'beginWaitingForInstructorToPickupTest' : ['faculty', 'office'],
'prepareTestForReturn' : ['faculty', 'office'],
'returnTestToInstructor' : ['faculty', 'office'],
}
studentApplicationEmailRecipients = {
'submitToOffice' : ['student', 'office'],
'approveApplication' : ['student', 'office'],
}
blueSheetTransitions = blueSheetEmailRecipients.keys()
studentApplicationTransitions = studentApplicationEmailRecipients.keys()
def sendNotificationEmails(obj, event):
if not event.transition:
return
objType = obj.Type()
transitionId = event.transition.id
if objType == 'Blue Sheet' and (transitionId in blueSheetTransitions):
recipients = blueSheetEmailRecipients[transitionId]
elif objType == 'Student Application' and (transitionId in studentApplicationTransitions):
recipients = studentApplicationEmailRecipients[transitionId]
else:
return
newStateTitle = event.new_state.title
objTitle = obj.Title()
officeEmail = getSuccessCCEmailAddress(obj)
mTo = []
if 'student' in recipients:
mTo.append(obj.getEmail())
if 'faculty' in recipients:
mTo.append(obj.getFacultyEmail())
if 'office' in recipients:
mTo.append(officeEmail)
mFrom = officeEmail
mSubj = '%s is now %s' % (objTitle, newStateTitle)
mBody = _lookupEmailBody(obj, objType, transitionId)
mBody = '%s\n\n%s' % (mBody, obj.absolute_url())
if isSuccessDebugModeEnabled(obj):
mTo = []
emailAddress = getSuccessDebugModeEmailAddress(obj)
if emailAddress != '':
for recipient in recipients:
mTo.append(emailAddress.replace('@', '+%s@' % recipient))
obj.MailHost.send(mBody, mTo, mFrom, mSubj)
def addNewStudentToStudentsGroup(obj, event):
if not event.transition or not event.transition.id == 'approveApplication':
return
email = obj.getEmail()
(memberId, _) = email.split('@')
portal_groups = getToolByName(obj, 'portal_groups')
portal_groups.addPrincipalToGroup(memberId, 'UWOshSuccess.Students')
def copyRequestedAccommodationsToGrantedAccommodations(obj, event):
if not event.transition or not event.transition.id == 'submitToInstructor':
return
requestedAccommodations = obj.getTestAccommodationsRequested()
obj.setTestAccommodationsGranted(requestedAccommodations)
def addLocalFacultyRole(obj, event):
if not event.transition or not event.transition.id == 'submitToInstructor':
return
facultyId = obj.getFacultyName()
obj.manage_addLocalRoles(facultyId, ['UWOshSuccess.LocalFaculty',])
| gpl-2.0 | -9,204,435,009,006,009,000 | 34.508929 | 124 | 0.687956 | false |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py | 1 | 21656 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import datetime
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self._http_server_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(self._options, args)
def _is_http_test(self, test):
return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
def _is_websocket_test(self, test):
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if self._options.build:
exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
if exit_code:
_log.error("Build check failed")
return exit_code
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
if exit_code:
self._port.stop_helper()
return exit_code
if self._options.clobber_old_results:
self._clobber_old_results()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return test_run_results.OK_EXIT_STATUS
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
try:
paths, test_names = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
# Don't retry failures if an explicit list of tests was passed in.
if self._options.retry_failures is None:
should_retry_failures = len(paths) < len(test_names)
else:
should_retry_failures = self._options.retry_failures
enabled_pixel_tests_in_retry = False
try:
self._start_servers(tests_to_run)
initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
self._port.num_workers(int(self._options.child_processes)), retrying=False)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
if should_retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
_log.info('')
_log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
_log.info('')
retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
num_workers=1, retrying=True)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
else:
retry_results = None
finally:
self._stop_servers()
self._clean_up_run()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
if retry_results:
self._look_for_new_crash_logs(retry_results, start_time)
_log.debug("summarizing results")
summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)' %
(exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS))
exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
self._upload_json_files()
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)
def _start_servers(self, tests_to_run):
if self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
# tracking.
self._printer.write_update("Clobbering old results in %s" %
self._results_directory)
layout_tests_dir = self._port.layout_tests_dir()
possible_dirs = self._port.test_dirs()
for dirname in possible_dirs:
if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
# Port specific clean-up.
self._port.clobber_old_port_specific_results()
def _tests_to_retry(self, run_results):
return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
# We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
_log.debug("Finished writing JSON files.")
def _upload_json_files(self):
if not self._options.test_results_server:
return
if not self._options.master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
attrs = [("builder", self._options.builder_name),
("testtype", "layout-tests"),
("master", self._options.master_name)]
files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
if response:
if response.code == 200:
_log.debug("JSON uploaded.")
else:
_log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
else:
_log.error("JSON upload failed; no response returned")
except Exception, err:
_log.error("Upload failed: %s" % err)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
| gpl-3.0 | 2,364,930,973,769,126,400 | 47.556054 | 192 | 0.656723 | false |
belokop/indico_bare | indico/modules/events/contributions/util.py | 1 | 15345 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from datetime import timedelta
from io import BytesIO
from operator import attrgetter
from flask import flash, session
from pytz import timezone
from sqlalchemy.orm import load_only, contains_eager, noload, joinedload, subqueryload
from indico.core.db import db
from indico.modules.events.models.events import Event
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.contributions.models.persons import ContributionPersonLink, SubContributionPersonLink
from indico.modules.events.contributions.models.principals import ContributionPrincipal
from indico.modules.events.util import serialize_person_link, ReporterBase
from indico.modules.attachments.util import get_attached_items
from indico.util.caching import memoize_request
from indico.util.date_time import format_human_timedelta, format_datetime
from indico.util.i18n import _
from indico.util.string import to_unicode
from indico.util.user import iter_acl
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import url_for
from indico.web.http_api.metadata.serializer import Serializer
from indico.web.util import jsonify_data
from MaKaC.common.timezoneUtils import DisplayTZ
def get_events_with_linked_contributions(user, from_dt=None, to_dt=None):
"""Returns a dict with keys representing event_id and the values containing
data about the user rights for contributions within the event
:param user: A `User`
:param from_dt: The earliest event start time to look for
:param to_dt: The latest event start time to look for
"""
def add_acl_data():
query = (user.in_contribution_acls
.options(load_only('contribution_id', 'roles', 'full_access', 'read_access'))
.options(noload('*'))
.options(contains_eager(ContributionPrincipal.contribution).load_only('event_id'))
.join(Contribution)
.join(Event, Event.id == Contribution.event_id)
.filter(~Contribution.is_deleted, ~Event.is_deleted, Event.starts_between(from_dt, to_dt)))
for principal in query:
roles = data[principal.contribution.event_id]
if 'submit' in principal.roles:
roles.add('contribution_submission')
if principal.full_access:
roles.add('contribution_manager')
if principal.read_access:
roles.add('contribution_access')
def add_contrib_data():
has_contrib = (EventPerson.contribution_links.any(
ContributionPersonLink.contribution.has(~Contribution.is_deleted)))
has_subcontrib = EventPerson.subcontribution_links.any(
SubContributionPersonLink.subcontribution.has(db.and_(
~SubContribution.is_deleted,
SubContribution.contribution.has(~Contribution.is_deleted))))
query = (Event.query
.options(load_only('id'))
.options(noload('*'))
.filter(~Event.is_deleted,
Event.starts_between(from_dt, to_dt),
Event.persons.any((EventPerson.user_id == user.id) & (has_contrib | has_subcontrib))))
for event in query:
data[event.id].add('contributor')
data = defaultdict(set)
add_acl_data()
add_contrib_data()
return data
def serialize_contribution_person_link(person_link, is_submitter=None):
"""Serialize ContributionPersonLink to JSON-like object"""
data = serialize_person_link(person_link)
data['isSpeaker'] = person_link.is_speaker
if not isinstance(person_link, SubContributionPersonLink):
data['authorType'] = person_link.author_type.value
data['isSubmitter'] = person_link.is_submitter if is_submitter is None else is_submitter
return data
class ContributionReporter(ReporterBase):
"""Reporting and filtering actions in the contribution report."""
endpoint = '.manage_contributions'
report_link_type = 'contribution'
def __init__(self, event):
super(ContributionReporter, self).__init__(event)
self.default_report_config = {'filters': {'items': {}}}
session_empty = {None: 'No session'}
track_empty = {None: 'No track'}
type_empty = {None: 'No type'}
session_choices = {unicode(s.id): s.title for s in self.report_event.sessions}
track_choices = {unicode(t.id): to_unicode(t.getTitle()) for t in self.report_event.as_legacy.getTrackList()}
type_choices = {unicode(t.id): t.name for t in self.report_event.contribution_types}
self.filterable_items = OrderedDict([
('session', {'title': _('Session'),
'filter_choices': OrderedDict(session_empty.items() + session_choices.items())}),
('track', {'title': _('Track'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('type', {'title': _('Type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('status', {'title': _('Status'), 'filter_choices': {'scheduled': _('Scheduled'),
'unscheduled': _('Not scheduled')}})
])
self.report_config = self._get_config()
def build_query(self):
timetable_entry_strategy = joinedload('timetable_entry')
timetable_entry_strategy.lazyload('*')
return (Contribution.query.with_parent(self.report_event)
.order_by(Contribution.friendly_id)
.options(timetable_entry_strategy,
joinedload('session'),
subqueryload('person_links'),
db.undefer('subcontribution_count'),
db.undefer('attachment_count'),
db.undefer('is_scheduled')))
def filter_report_entries(self, query, filters):
if not filters.get('items'):
return query
criteria = []
if 'status' in filters['items']:
filtered_statuses = filters['items']['status']
status_criteria = []
if 'scheduled' in filtered_statuses:
status_criteria.append(Contribution.is_scheduled)
if 'unscheduled' in filtered_statuses:
status_criteria.append(~Contribution.is_scheduled)
if status_criteria:
criteria.append(db.or_(*status_criteria))
filter_cols = {'session': Contribution.session_id,
'track': Contribution.track_id,
'type': Contribution.type_id}
for key, column in filter_cols.iteritems():
ids = set(filters['items'].get(key, ()))
if not ids:
continue
column_criteria = []
if None in ids:
column_criteria.append(column.is_(None))
if ids - {None}:
column_criteria.append(column.in_(ids - {None}))
criteria.append(db.or_(*column_criteria))
return query.filter(*criteria)
def get_contrib_report_kwargs(self):
contributions_query = self.build_query()
total_entries = contributions_query.count()
contributions = self.filter_report_entries(contributions_query, self.report_config['filters']).all()
sessions = [{'id': s.id, 'title': s.title, 'colors': s.colors} for s in self.report_event.sessions]
tracks = [{'id': int(t.id), 'title': to_unicode(t.getTitle())}
for t in self.report_event.as_legacy.getTrackList()]
total_duration = (sum((c.duration for c in contributions), timedelta()),
sum((c.duration for c in contributions if c.timetable_entry), timedelta()))
return {'contribs': contributions, 'sessions': sessions, 'tracks': tracks, 'total_entries': total_entries,
'total_duration': total_duration}
def render_contrib_report(self, contrib=None):
"""Render the contribution report template components.
:param contrib: Used in RHs responsible for CRUD operations on a
contribution.
:return: dict containing the report's entries, the fragment of
displayed entries and whether the contrib passed is displayed
in the results.
"""
contrib_report_kwargs = self.get_contrib_report_kwargs()
total_entries = contrib_report_kwargs.pop('total_entries')
tpl_contrib = get_template_module('events/contributions/management/_contribution_report.html')
tpl_reports = get_template_module('events/management/_reports.html')
contribs = contrib_report_kwargs['contribs']
filter_statistics = tpl_reports.render_filter_statistics(len(contribs), total_entries,
contrib_report_kwargs.pop('total_duration'))
return {'html': tpl_contrib.render_contrib_report(self.report_event, total_entries, **contrib_report_kwargs),
'hide_contrib': contrib not in contribs if contrib else None,
'filter_statistics': filter_statistics}
def flash_info_message(self, contrib):
flash(_("The contribution '{}' is not displayed in the list due to the enabled filters")
.format(contrib.title), 'info')
def generate_spreadsheet_from_contributions(contributions):
"""Return a tuple consisting of spreadsheet columns and respective
contribution values"""
headers = ['Id', 'Title', 'Description', 'Date', 'Duration', 'Type', 'Session', 'Track', 'Presenters', 'Materials']
rows = []
for c in sorted(contributions, key=attrgetter('friendly_id')):
contrib_data = {'Id': c.friendly_id, 'Title': c.title, 'Description': c.description,
'Duration': format_human_timedelta(c.duration),
'Date': format_datetime(c.timetable_entry.start_dt) if c.timetable_entry else None,
'Type': c.type.name if c.type else None,
'Session': c.session.title if c.session else None,
'Track': c.track.title if c.track else None,
'Materials': None,
'Presenters': ', '.join(speaker.person.full_name for speaker in c.speakers)}
attachments = []
attached_items = get_attached_items(c)
for attachment in attached_items.get('files', []):
attachments.append(attachment.absolute_download_url)
for folder in attached_items.get('folders', []):
for attachment in folder.attachments:
attachments.append(attachment.absolute_download_url)
if attachments:
contrib_data['Materials'] = ', '.join(attachments)
rows.append(contrib_data)
return headers, rows
def make_contribution_form(event):
"""Extends the contribution WTForm to add the extra fields.
Each extra field will use a field named ``custom_ID``.
:param event: The `Event` for which to create the contribution form.
:return: A `ContributionForm` subclass.
"""
from indico.modules.events.contributions.forms import ContributionForm
form_class = type(b'_ContributionForm', (ContributionForm,), {})
for custom_field in event.contribution_fields:
field_impl = custom_field.mgmt_field
if field_impl is None:
# field definition is not available anymore
continue
name = 'custom_{}'.format(custom_field.id)
setattr(form_class, name, field_impl.create_wtf_field())
return form_class
def contribution_type_row(contrib_type):
template = get_template_module('events/contributions/management/_types_table.html')
html = template.types_table_row(contrib_type=contrib_type)
return jsonify_data(html_row=html, flash=False)
@memoize_request
def get_contributions_with_user_as_submitter(event, user):
"""Get a list of contributions in which the `user` has submission rights"""
contribs = (Contribution.query.with_parent(event)
.options(joinedload('acl_entries'))
.filter(Contribution.acl_entries.any(ContributionPrincipal.has_management_role('submit')))
.all())
return {c for c in contribs if any(user in entry.principal for entry in iter_acl(c.acl_entries))}
def serialize_contribution_for_ical(contrib):
return {
'_fossil': 'contributionMetadata',
'id': contrib.id,
'startDate': contrib.timetable_entry.start_dt if contrib.timetable_entry else None,
'endDate': contrib.timetable_entry.end_dt if contrib.timetable_entry else None,
'url': url_for('contributions.display_contribution', contrib, _external=True),
'title': contrib.title,
'location': contrib.venue_name,
'roomFullname': contrib.room_name,
'speakers': [serialize_person_link(x) for x in contrib.speakers],
'description': contrib.description
}
def get_contribution_ical_file(contrib):
data = {'results': serialize_contribution_for_ical(contrib)}
serializer = Serializer.create('ics')
return BytesIO(serializer(data))
class ContributionDisplayReporter(ContributionReporter):
endpoint = '.contribution_list'
report_link_type = 'contribution_display'
def render_contribution_list(self):
"""Render the contribution report template components.
:return: dict containing the report's entries, the fragment of
displayed entries and whether the contrib passed is displayed
in the results.
"""
contrib_report_kwargs = self.get_contrib_report_kwargs()
total_entries = contrib_report_kwargs.pop('total_entries')
contribs = contrib_report_kwargs['contribs']
tpl = get_template_module('events/contributions/display/_contribution_list.html')
tpl_reports = get_template_module('events/management/_reports.html')
tz = timezone(DisplayTZ(session.user, self.report_event.as_legacy).getDisplayTZ())
return {'html': tpl.render_contribution_list(self.report_event, tz, contribs),
'counter': tpl_reports.render_displayed_entries_fragment(len(contribs), total_entries)}
| gpl-3.0 | 8,352,198,904,003,203,000 | 46.953125 | 119 | 0.647703 | false |
deathglitch/metarigging | python/rigging/rig_cog.py | 1 | 1030 | import pymel.core as pm
import grip
import metautil.miscutil as miscutil
import metautil.rigutil as rigutil
import metautil.shapeutil as shapeutil
def rig_cog_chain(start_joint, end_joint, scale):
start_joint = pm.PyNode(start_joint)
end_joint = pm.PyNode(end_joint)
chain = miscutil.get_nodes_between(start_joint, end_joint, lambda x: isinstance(x, pm.nt.Joint))
grips = []
parent_consts = []
for x, joint in enumerate(chain):
cog_shape_node = shapeutil.create_poly_shape_cog(scale)
grip_node = grip.Grip.create(joint, shape=cog_shape_node, name_root = 'cog')
grip_node.setAttr('rotateOrder', 2)
parent_const = pm.parentConstraint(grip_node, joint, w=1, mo=1)
grips.append(grip_node)
parent_consts.append(parent_const)
result = {}
result['chain'] = chain
result['start_joint'] = start_joint
result['end_joint'] = end_joint
result['grips'] = grips
result['parent_consts'] = parent_consts
return result | mit | 4,577,479,774,416,454,000 | 33.366667 | 100 | 0.658252 | false |
DavidPurcell/murano_temp | murano_tempest_tests/tests/api/application_catalog/test_categories.py | 1 | 4319 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import testtools
from murano_tempest_tests.tests.api.application_catalog import base
from murano_tempest_tests import utils
class TestCategories(base.BaseApplicationCatalogIsolatedAdminTest):
@classmethod
def resource_setup(cls):
super(TestCategories, cls).resource_setup()
application_name = utils.generate_name(cls.__name__)
cls.abs_archive_path, dir_with_archive, archive_name = \
utils.prepare_package(application_name)
cls.package = cls.application_catalog_client.upload_package(
application_name, archive_name, dir_with_archive,
{"categories": [], "tags": [], 'is_public': False})
name = utils.generate_name(cls.__name__)
cls.category = cls.application_catalog_client.create_category(name)
@classmethod
def resource_cleanup(cls):
os.remove(cls.abs_archive_path)
cls.application_catalog_client.delete_package(cls.package['id'])
cls.application_catalog_client.delete_category(cls.category['id'])
super(TestCategories, cls).resource_cleanup()
@testtools.testcase.attr('smoke')
def test_get_list_categories(self):
categories_list = self.application_catalog_client.list_categories()
self.assertIsInstance(categories_list, list)
@testtools.testcase.attr('smoke')
def test_create_and_delete_category(self):
name = utils.generate_name('create_and_delete_category')
categories_list = self.application_catalog_client.list_categories()
self.assertNotIn(name, categories_list)
category = self.application_catalog_client.create_category(name)
self.assertEqual(name, category['name'])
categories_list = self.application_catalog_client.list_categories()
self.assertIn(name, categories_list)
self.application_catalog_client.delete_category(category['id'])
categories_list = self.application_catalog_client.list_categories()
self.assertNotIn(name, categories_list)
@testtools.testcase.attr('smoke')
def test_get_category(self):
category = self.application_catalog_client.get_category(
self.category['id'])
self.assertEqual(self.category['id'], category['id'])
self.assertEqual(self.category['name'], category['name'])
@testtools.testcase.attr('smoke')
def test_add_package_to_new_category_and_remove_it_from_category(self):
category = self.application_catalog_client.get_category(
self.category['id'])
self.assertEqual(0, category['package_count'])
post_body = [
{
"op": "add",
"path": "/categories",
"value": [category['name']]
}
]
package = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertIn(self.category['name'], package['categories'])
category = self.application_catalog_client.get_category(
self.category['id'])
self.assertEqual(1, category['package_count'])
self.assertEqual(1, len(category['packages']))
post_body = [
{
"op": "remove",
"path": "/categories",
"value": [category['name']]
}
]
package = self.application_catalog_client.update_package(
self.package['id'], post_body)
self.assertNotIn(self.category['name'], package['categories'])
category = self.application_catalog_client.get_category(
self.category['id'])
self.assertEqual(0, category['package_count'])
self.assertEqual(0, len(category['packages']))
| apache-2.0 | 1,697,376,191,033,877,000 | 40.528846 | 78 | 0.65015 | false |
tgfjt/Sublime-clipboardRound | clipboardround.py | 1 | 3963 | import sublime, sublime_plugin
history = []
menuitems = []
history_index = 0
def getClipboardData():
try:# win32
import win32clipboard
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
except:
pass
try:# windows7
import ctypes
ctypes.windll.user32.OpenClipboard(None)
pc = ctypes.windll.user32.GetClipboardData(1)
data = ctypes.c_char_p(pc).value.decode()
ctypes.windll.user32.CloseClipboard()
except:
pass
try:# mac
import subprocess
p = subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
try:# xclip
import subprocess
p = subprocess.Popen(['xclip', '-o'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
try:# xsel
import subprocess
p = subprocess.Popen(['xclip', '-bo'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
if not 'data' in locals():
return False
else:
return data
def setClipboardHistory():
global history_index, menuitems, history
data = getClipboardData()
if data == False:
return None
elif data in history:
return None
elif data == '':
return None
settings = sublime.load_settings('Sublime-clipboardRound.sublime-settings')
limit = settings.get('limit')
if not history or history[0] != data:
history.insert(0, data)
history_index = 0
menuitems = history
if limit < len(history):
for i in range(len(history) - limit):
history.pop()
menuitems.pop()
return None
def pasteClipboardHistory(self, text):
self.view.run_command('undo')
self.view.run_command('paste')
sublime.set_clipboard(text)
class Clip_round_showCommand(sublime_plugin.TextCommand):
def on_chosen(self, index):
global flag
if index == -1:
return
sublime.set_clipboard(menuitems[index])
self.view.run_command('paste')
flag = True
def run(self, edit):
global menuitems
if menuitems == []:
return None
self.view.window().show_quick_panel(menuitems, self.on_chosen, sublime.MONOSPACE_FONT)
class Clip_round_prevCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history_index
if history:
clip = sublime.get_clipboard()
history_index = min(history_index + 1, len(history) - 1)
sublime.set_clipboard(history[history_index])
sublime.set_timeout(lambda:
pasteClipboardHistory(self, clip), 0)
class Clip_round_nextCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history_index
if history:
clip = sublime.get_clipboard()
history_index = max(history_index - 1, 0)
sublime.set_clipboard(history[history_index])
sublime.set_timeout(lambda:
pasteClipboardHistory(self, clip), 0)
class Clip_round_clearCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history, history_index, menuitems, data
del menuitems[:]
del history[:]
history_index = 0
sublime.set_clipboard('')
print('clipboardRound: clear Clipboard History.')
class ClipboardRoundListener(sublime_plugin.EventListener):
def on_query_context(self, view, *args):
sublime.set_timeout(lambda:
setClipboardHistory(), 0)
return None
def on_text_command(self, view, command, *args):
sublime.set_timeout(lambda:
setClipboardHistory(), 0)
| mit | -1,768,667,037,384,106,800 | 26.143836 | 94 | 0.604088 | false |
wavelets/ThinkStats2 | code/density.py | 1 | 2742 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import random
import scipy.stats
import brfss
import first
import thinkstats2
import thinkplot
def Summarize(data):
mean = data.mean()
std = data.std()
median = thinkstats2.Median(data)
print('mean', mean)
print('std', std)
print('median', median)
print('skewness', thinkstats2.Skewness(data))
print('pearson skewness',
thinkstats2.PearsonMedianSkewness(data))
return mean, median
def ComputeSkewnesses():
def VertLine(x, y):
thinkplot.Plot([x, x], [0, y], color='0.6', linewidth=1)
live, firsts, others = first.MakeFrames()
data = live.totalwgt_lb.dropna()
print('Birth weight')
mean, median = Summarize(data)
y = 0.35
VertLine(mean, y)
thinkplot.Text(mean-0.15, 0.1*y, 'mean', horizontalalignment='right')
VertLine(median, y)
thinkplot.Text(median+0.1, 0.1*y, 'median', horizontalalignment='left')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='birth weight')
thinkplot.Save(root='density_totalwgt_kde',
xlabel='lbs',
ylabel='PDF')
df = brfss.ReadBrfss(nrows=None)
data = df.wtkg2.dropna()
print('Adult weight')
mean, median = Summarize(data)
y = 0.02499
VertLine(mean, y)
thinkplot.Text(mean+1, 0.1*y, 'mean', horizontalalignment='left')
VertLine(median, y)
thinkplot.Text(median-1.5, 0.1*y, 'median', horizontalalignment='right')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='adult weight')
thinkplot.Save(root='density_wtkg2_kde',
xlabel='kg',
ylabel='PDF',
xlim=[0, 200])
def MakePdfExample():
# mean and var of women's heights in cm, from the BRFSS
mean, var = 163, 52.8
std = math.sqrt(var)
# make a PDF and compute a density, FWIW
pdf = thinkstats2.GaussianPdf(mean, std)
print(pdf.Density(mean + std))
# make a PMF and plot it
thinkplot.PrePlot(2)
thinkplot.Pdf(pdf, label='Gaussian')
# make a sample, make an estimated PDF, and plot it
sample = [random.gauss(mean, std) for i in range(100)]
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf, label='sample KDE')
thinkplot.Save(root='pdf_example',
xlabel='Height (cm)',
ylabel='Density')
def main():
thinkstats2.RandomSeed(17)
MakePdfExample()
ComputeSkewnesses()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,302,440,263,507,806,000 | 25.114286 | 76 | 0.633844 | false |
ric2b/Vivaldi-browser | chromium/build/fuchsia/qemu_target.py | 1 | 7359 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements commands for running and interacting with Fuchsia on QEMU."""
import boot_data
import common
import emu_target
import logging
import md5
import os
import platform
import shutil
import subprocess
import sys
import tempfile
from common import GetEmuRootForPlatform, EnsurePathExists
# Virtual networking configuration data for QEMU.
GUEST_NET = '192.168.3.0/24'
GUEST_IP_ADDRESS = '192.168.3.9'
HOST_IP_ADDRESS = '192.168.3.2'
GUEST_MAC_ADDRESS = '52:54:00:63:5e:7b'
# Capacity of the system's blobstore volume.
EXTENDED_BLOBSTORE_SIZE = 1073741824 # 1GB
class QemuTarget(emu_target.EmuTarget):
def __init__(self, output_dir, target_cpu, system_log_file,
emu_type, cpu_cores, require_kvm, ram_size_mb):
super(QemuTarget, self).__init__(output_dir, target_cpu,
system_log_file)
self._emu_type=emu_type
self._cpu_cores=cpu_cores
self._require_kvm=require_kvm
self._ram_size_mb=ram_size_mb
def _GetEmulatorName(self):
return self._emu_type
def _IsKvmEnabled(self):
if self._require_kvm:
if (sys.platform.startswith('linux') and
os.access('/dev/kvm', os.R_OK | os.W_OK)):
if self._target_cpu == 'arm64' and platform.machine() == 'aarch64':
return True
if self._target_cpu == 'x64' and platform.machine() == 'x86_64':
return True
return False
def _BuildQemuConfig(self):
boot_data.AssertBootImagesExist(self._GetTargetSdkArch(), 'qemu')
emu_command = [
'-kernel', EnsurePathExists(
boot_data.GetTargetFile('qemu-kernel.kernel',
self._GetTargetSdkArch(),
boot_data.TARGET_TYPE_QEMU)),
'-initrd', EnsurePathExists(
boot_data.GetBootImage(self._output_dir, self._GetTargetSdkArch(),
boot_data.TARGET_TYPE_QEMU)),
'-m', str(self._ram_size_mb),
'-smp', str(self._cpu_cores),
# Attach the blobstore and data volumes. Use snapshot mode to discard
# any changes.
'-snapshot',
'-drive', 'file=%s,format=qcow2,if=none,id=blobstore,snapshot=on' %
_EnsureBlobstoreQcowAndReturnPath(self._output_dir,
self._GetTargetSdkArch()),
'-device', 'virtio-blk-pci,drive=blobstore',
# Use stdio for the guest OS only; don't attach the QEMU interactive
# monitor.
'-serial', 'stdio',
'-monitor', 'none',
]
# Configure the machine to emulate, based on the target architecture.
if self._target_cpu == 'arm64':
emu_command.extend([
'-machine','virt',
])
netdev_type = 'virtio-net-pci'
else:
emu_command.extend([
'-machine', 'q35',
])
netdev_type = 'e1000'
# Configure virtual network. It is used in the tests to connect to
# testserver running on the host.
netdev_config = 'user,id=net0,net=%s,dhcpstart=%s,host=%s' % \
(GUEST_NET, GUEST_IP_ADDRESS, HOST_IP_ADDRESS)
self._host_ssh_port = common.GetAvailableTcpPort()
netdev_config += ",hostfwd=tcp::%s-:22" % self._host_ssh_port
emu_command.extend([
'-netdev', netdev_config,
'-device', '%s,netdev=net0,mac=%s' % (netdev_type, GUEST_MAC_ADDRESS),
])
# Configure the CPU to emulate.
# On Linux, we can enable lightweight virtualization (KVM) if the host and
# guest architectures are the same.
if self._IsKvmEnabled():
kvm_command = ['-enable-kvm', '-cpu', 'host,migratable=no']
else:
logging.warning('Unable to launch %s with KVM acceleration.'
% (self._emu_type) +
'The guest VM will be slow.')
if self._target_cpu == 'arm64':
kvm_command = ['-cpu', 'cortex-a53']
else:
kvm_command = ['-cpu', 'Haswell,+smap,-check,-fsgsbase']
emu_command.extend(kvm_command)
kernel_args = boot_data.GetKernelArgs(self._output_dir)
# TERM=dumb tells the guest OS to not emit ANSI commands that trigger
# noisy ANSI spew from the user's terminal emulator.
kernel_args.append('TERM=dumb')
# Construct kernel cmd line
kernel_args.append('kernel.serial=legacy')
# Don't 'reboot' the emulator if the kernel crashes
kernel_args.append('kernel.halt-on-panic=true')
emu_command.extend(['-append', ' '.join(kernel_args)])
return emu_command
def _BuildCommand(self):
qemu_exec = 'qemu-system-'+self._GetTargetSdkLegacyArch()
qemu_command = [os.path.join(GetEmuRootForPlatform(self._emu_type), 'bin',
qemu_exec)]
qemu_command.extend(self._BuildQemuConfig())
qemu_command.append('-nographic')
return qemu_command
def _ComputeFileHash(filename):
hasher = md5.new()
with open(filename, 'rb') as f:
buf = f.read(4096)
while buf:
hasher.update(buf)
buf = f.read(4096)
return hasher.hexdigest()
def _EnsureBlobstoreQcowAndReturnPath(output_dir, target_arch):
"""Returns a file containing the Fuchsia blobstore in a QCOW format,
with extra buffer space added for growth."""
qimg_tool = os.path.join(common.GetEmuRootForPlatform('qemu'),
'bin', 'qemu-img')
fvm_tool = os.path.join(common.SDK_ROOT, 'tools', 'fvm')
blobstore_path = boot_data.GetTargetFile('storage-full.blk', target_arch,
'qemu')
qcow_path = os.path.join(output_dir, 'gen', 'blobstore.qcow')
# Check a hash of the blobstore to determine if we can re-use an existing
# extended version of it.
blobstore_hash_path = os.path.join(output_dir, 'gen', 'blobstore.hash')
current_blobstore_hash = _ComputeFileHash(blobstore_path)
if os.path.exists(blobstore_hash_path) and os.path.exists(qcow_path):
if current_blobstore_hash == open(blobstore_hash_path, 'r').read():
return qcow_path
# Add some extra room for growth to the Blobstore volume.
# Fuchsia is unable to automatically extend FVM volumes at runtime so the
# volume enlargement must be performed prior to QEMU startup.
# The 'fvm' tool only supports extending volumes in-place, so make a
# temporary copy of 'blobstore.bin' before it's mutated.
extended_blobstore = tempfile.NamedTemporaryFile()
shutil.copyfile(blobstore_path, extended_blobstore.name)
subprocess.check_call([fvm_tool, extended_blobstore.name, 'extend',
'--length', str(EXTENDED_BLOBSTORE_SIZE),
blobstore_path])
# Construct a QCOW image from the extended, temporary FVM volume.
# The result will be retained in the build output directory for re-use.
subprocess.check_call([qimg_tool, 'convert', '-f', 'raw', '-O', 'qcow2',
'-c', extended_blobstore.name, qcow_path])
# Write out a hash of the original blobstore file, so that subsequent runs
# can trivially check if a cached extended FVM volume is available for reuse.
with open(blobstore_hash_path, 'w') as blobstore_hash_file:
blobstore_hash_file.write(current_blobstore_hash)
return qcow_path
| bsd-3-clause | 5,498,573,082,064,078,000 | 35.430693 | 80 | 0.636364 | false |
SitiBanc/1061_NCTU_IOMDS | Final/autoencoder_keras.py | 1 | 4171 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 14:02:01 2018
@author: jie-yu
"""
import numpy as np
np.random.seed(1337) # for reproducibility
#from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input
import matplotlib.pyplot as plt
from PIL import Image
import os
labels = np.load("img_labels.npy")
y_train = np.zeros((len(labels),1))#len(labels)
#def hot_to_num():
for i in range(len(labels)):#len(labels)
y_train[i] = np.where(labels[i]==1)[0][0]
#image = Image.open("hw3_img.jpg")
os.chdir('D:\\Jie-Yu\\碩一上\\智慧型\\期末project\\img\\img')
filelist = os.listdir()
x = np.zeros((len(filelist),150*150))
for i in range(len(filelist)):
IMG = Image.open(filelist[i])
x[i,:]=np.array(IMG.getdata())
x_train = x.copy()
x_test = x_train.copy()
y_test = y_train.copy()
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
# data pre-processing
x_train = x_train.astype('float32') / 255. - 0.5 # minmax_normalized
x_test = x_test.astype('float32') / 255. - 0.5 # minmax_normalized
x_train = x_train.reshape((x_train.shape[0], -1))
x_test = x_test.reshape((x_test.shape[0], -1))
print(x_train.shape)
print(x_test.shape)
# in order to plot in a 2D figure
encoding_dim = 2
# this is our input placeholder
input_img = Input(shape=(150*150,))
# encoder layers
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(10, activation='relu')(encoded)
encoder_output = Dense(encoding_dim)(encoded)
# decoder layers
decoded = Dense(10, activation='relu')(encoder_output)
decoded = Dense(64, activation='relu')(decoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(150*150, activation='tanh')(decoded)
# construct the autoencoder model
autoencoder = Model(input=input_img, output=decoded)
# construct the encoder model for plotting
encoder = Model(input=input_img, output=encoder_output)
# compile autoencoder
autoencoder.compile(optimizer='adam', loss='mse')
# training
autoencoder.fit(x_train, x_train,
nb_epoch=20,
batch_size=256,
shuffle=True)
"""
Epoch 20/20
60000/60000 [==============================] - 7s - loss: 0.0398
"""
# plotting
encoded_imgs = encoder.predict(x_test)
plt.scatter(encoded_imgs[:, 0], encoded_imgs[:, 1], c=y_test)
plt.colorbar()
plt.show()
def lda(X,L):
Classes = np.unique(np.array(L))#0,1,2
k = len(Classes)#k = 3
n = np.zeros((k,1))#3*1 array
C = [" "]*k #3*1 list
M = np.mean(X,axis = 0) #X的mean
S = [" "]*k #3*1 list
Sw = 0
Sb = 0
for j in range(k):#3
Xj = X[np.where(L==Classes[j])[0]]
n[j] = int(Xj.shape[0])
C[j] = np.mean(Xj,axis = 0)
S[j] = 0
for i in range(int(n[j])):
aaa = np.array([Xj[i,:]-C[j]])
S[j] = S[j]+np.dot(aaa.T,aaa)
Sw = Sw+S[j]
bbb = np.array([C[j]-M])
Sb = Sb+int(n[j])*np.dot(bbb.T,bbb)
tmp = np.dot(np.linalg.inv(Sw),Sb)
LAMBDA,W = np.linalg.eig(tmp)
SortOrder = np.argsort(-LAMBDA)
# print(W)
W = W[:,SortOrder[0:1]]
Y = np.dot(X,W)
Y = -Y
return Y,W
Y,W = lda(encoded_imgs,np.array(y_test))#降成一維的特徵
Y_sort = np.squeeze(Y).argsort()
Y_list = []
for i in range(len(Y_sort)):
aaa = (x_test[Y_sort[i]]+0.5)*255
Y_list.append(aaa.reshape(150,150).T.astype('uint8'))
Y_list = np.array(Y_list)
def draw_func(a,b):
start = min(a,b)
end = max(a,b)
if end-start>10:
jump = (end-start)//10
draw = Y_list[range(start,end,jump)]
draw = draw.reshape((len(range(start,end,jump)))*150,150)
else:
draw = Y_list[start:end]
draw = draw.reshape((end-start)*150,150)
draw = draw.T
Image.fromarray(draw).show()
#draw = np.array(Y_list)
draw_func(500,510)
#draw_func(500,502)
#draw_func(502,503)
| apache-2.0 | 8,473,934,033,202,787,000 | 28.659259 | 90 | 0.595554 | false |
privacyidea/privacyidea | tests/test_lib_token.py | 1 | 92108 | # -*- coding: utf-8 -*-
"""
This test file tests the lib.token methods.
The lib.token depends on the DB model and lib.user and
all lib.tokenclasses
This tests the token functions on an interface level
We start with simple database functions:
getTokens4UserOrSerial
gettokensoftype
getToken....
"""
from .base import MyTestCase, FakeAudit, FakeFlaskG
from privacyidea.lib.user import (User)
from privacyidea.lib.tokenclass import TokenClass, TOKENKIND, FAILCOUNTER_EXCEEDED, FAILCOUNTER_CLEAR_TIMEOUT
from privacyidea.lib.token import weigh_token_type
from privacyidea.lib.tokens.totptoken import TotpTokenClass
from privacyidea.models import (Token, Challenge, TokenRealm)
from privacyidea.lib.config import (set_privacyidea_config, get_token_types, delete_privacyidea_config, SYSCONF)
from privacyidea.lib.policy import set_policy, SCOPE, ACTION, delete_policy, PolicyClass, delete_policy
from privacyidea.lib.utils import b32encode_and_unicode, hexlify_and_unicode
from privacyidea.lib.error import PolicyError
import datetime
from dateutil import parser
import hashlib
import binascii
import warnings
from privacyidea.lib.token import (create_tokenclass_object,
get_tokens,
get_token_type, check_serial,
get_num_tokens_in_realm,
get_realms_of_token,
token_exist, get_token_owner, is_token_owner,
get_tokenclass_info,
get_tokens_in_resolver, get_otp,
get_token_by_otp, get_serial_by_otp,
gen_serial, init_token, remove_token,
set_realms, set_defaults, assign_token,
unassign_token, resync_token,
reset_token, set_pin, set_pin_user,
set_pin_so, enable_token,
is_token_active, set_hashlib, set_otplen,
set_count_auth, add_tokeninfo,
set_sync_window, set_count_window,
set_description, get_multi_otp,
set_max_failcount, copy_token_pin,
copy_token_user, lost_token,
check_token_list, check_serial_pass,
check_realm_pass,
check_user_pass,
get_dynamic_policy_definitions,
get_tokens_paginate,
set_validity_period_end,
set_validity_period_start, remove_token, delete_tokeninfo,
import_token, get_one_token, get_tokens_from_serial_or_user,
get_tokens_paginated_generator)
from privacyidea.lib.error import (TokenAdminError, ParameterError,
privacyIDEAError, ResourceNotFoundError)
from privacyidea.lib.tokenclass import DATE_FORMAT
from dateutil.tz import tzlocal
PWFILE = "tests/testdata/passwords"
OTPKEY = "3132333435363738393031323334353637383930"
OTPKE2 = "31323334353637383930313233343536373839AA"
class TokenTestCase(MyTestCase):
"""
Test the lib.token on an interface level
"""
def test_00_create_realms(self):
self.setUp_user_realms()
def test_01_create_token(self):
for serial in self.serials:
db_token = Token(serial, tokentype="totp")
db_token.update_otpkey(self.otpkey)
db_token.save()
token = TotpTokenClass(db_token)
self.assertTrue(token.token.serial == serial, token)
self.assertTrue(token.token.tokentype == "totp",
token.token.tokentype)
self.assertTrue(token.type == "totp", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "TOTP", class_prefix)
self.assertTrue(token.get_class_type() == "totp", token)
# Now we create a tokenclass, without knowing, that it is TOTP
token_object = create_tokenclass_object(db_token)
# Do some tests, that we have a TotpTokenClass
self.assertTrue(token_object.type == "totp", token_object.type)
self.assertTrue(token_object.mode[0] == "authenticate",
token_object.mode)
self.assertTrue(token_object.mode[1] == "challenge",
token_object.mode)
# Test wrong type or old entry in database
# a wrong token type will create None
db_token = Token("asdf", tokentype="remnant")
db_token.update_otpkey(self.otpkey)
db_token.save()
token_object = create_tokenclass_object(db_token)
self.assertTrue(token_object is None, token_object)
# delete the token, so that we do not get confused, later
db_token.delete()
def test_02_get_tokens(self):
# get All tokens
tokenobject_list = get_tokens()
# Check if these are valid tokentypes
self.assertTrue(len(tokenobject_list) > 0, tokenobject_list)
for token_object in tokenobject_list:
self.assertTrue(token_object.type in get_token_types(),
token_object.type)
# get assigned tokens
tokenobject_list = get_tokens(assigned=True)
self.assertTrue(len(tokenobject_list) == 0, tokenobject_list)
# get unassigned tokens
tokenobject_list = get_tokens(assigned=False)
self.assertTrue(len(tokenobject_list) > 0, tokenobject_list)
# pass the wrong parameter
# This will ignore the filter!
tokenobject_list = get_tokens(assigned="True")
self.assertTrue(len(tokenobject_list) > 0, tokenobject_list)
# get tokens of type HOTP
tokenobject_list = get_tokens(tokentype="hotp")
self.assertTrue(len(tokenobject_list) == 0, tokenobject_list)
# get tokens of type TOTP
tokenobject_list = get_tokens(tokentype="totp")
self.assertTrue(len(tokenobject_list) > 0, tokenobject_list)
# Search for tokens in realm
db_token = Token("hotptoken",
tokentype="hotp",
userid=1000,
resolver=self.resolvername1,
realm=self.realm1)
db_token.update_otpkey(self.otpkey)
db_token.save()
tokenobject_list = get_tokens(realm=self.realm1)
self.assertTrue(len(tokenobject_list) == 1, tokenobject_list)
self.assertTrue(tokenobject_list[0].type == "hotp",
tokenobject_list[0].type)
# get tokens for a given serial number
tokenobject_list = get_tokens(serial="hotptoken")
self.assertTrue(len(tokenobject_list) == 1, tokenobject_list)
# ...but not in an unassigned state!
tokenobject_list = get_tokens(serial="hotptoken", assigned=False)
self.assertTrue(len(tokenobject_list) == 0, tokenobject_list)
# get the tokens for the given user
tokenobject_list = get_tokens(user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(len(tokenobject_list) == 1, tokenobject_list)
# get tokens for a given tokeninfo of the token!!!
token = init_token({"type": "yubikey",
"serial": "yk1",
"yubikey.prefix": "vv123456",
"otpkey": self.otpkey})
self.assertEqual(token.token.serial, "yk1")
tokenobject_list = get_tokens(tokeninfo={"yubikey.prefix": "vv123456"})
self.assertEqual(len(tokenobject_list), 1)
self.assertEqual(tokenobject_list[0].get_tokeninfo("yubikey.prefix"),
"vv123456")
remove_token("yk1")
# Tokeninfo with more than one entry is not supported
self.assertRaises(privacyIDEAError, get_tokens,
tokeninfo={"key1": "value1",
"key2": "value2"})
# wildcard matches do not work for the ``serial`` parameter
tokenobject_list = get_tokens(serial="hotptoke*")
self.assertEqual(len(tokenobject_list), 0)
# get tokens with a wildcard serial
tokenobject_list = get_tokens(serial_wildcard="SE*")
self.assertEqual(len(tokenobject_list), 3)
# get all tokens
tokenobject_list = get_tokens(serial_wildcard="*")
self.assertEqual(len(tokenobject_list), 4)
def test_03_get_token_type(self):
ttype = get_token_type("hotptoken")
self.assertTrue(ttype == "hotp", ttype)
# test correct behavior with wildcards
self.assertEqual(get_token_type("SE1"), "totp")
self.assertEqual(get_token_type("SE*"), "")
self.assertEqual(get_token_type("*1"), "")
self.assertEqual(get_token_type("hotptoke*"), "")
def test_04_check_serial(self):
r, nserial = check_serial("hotptoken")
self.assertFalse(r, (r, nserial))
r, nserial = check_serial("Canbeusedfor a new token")
self.assertTrue(r, (r, nserial))
def test_05_get_num_tokens_in_realm(self):
# one active token
self.assertTrue(get_num_tokens_in_realm(self.realm1) == 1,
"{0!r}".format(get_num_tokens_in_realm(self.realm1)))
# No active tokens
self.assertTrue(get_num_tokens_in_realm(self.realm1, active=False) == 0)
def test_05_get_token_in_resolver(self):
tokenobject_list = get_tokens_in_resolver(self.resolvername1)
self.assertTrue(len(tokenobject_list) > 0)
def test_06_get_realms_of_token(self):
# Return a list of realmnames for a token
self.assertTrue(get_realms_of_token("hotptoken") == [self.realm1],
"{0!s}".format(get_realms_of_token("hotptoken")))
def test_07_token_exist(self):
self.assertTrue(token_exist("hotptoken"))
self.assertFalse(token_exist("does not exist"))
self.assertFalse(token_exist(""))
def test_08_token_owner(self):
# get_token_owner
user = get_token_owner("hotptoken")
self.assertTrue(user.login == "cornelius", user)
user = get_token_owner(self.serials[0])
self.assertFalse(user)
# for non existing token
with self.assertRaises(ResourceNotFoundError):
user = get_token_owner("does not exist")
# check if the token owner is cornelius
user = User("cornelius", realm=self.realm1, resolver=self.resolvername1)
self.assertTrue(is_token_owner("hotptoken", user),
get_token_owner("hotptoken"))
self.assertFalse(is_token_owner("hotptoken", User()),
get_token_owner("hotptoken"))
self.assertFalse(is_token_owner(self.serials[1], user),
get_token_owner(self.serials[1]))
def test_09_get_tokenclass_info(self):
info = get_tokenclass_info("hotp")
self.assertTrue("user" in info, info)
self.assertTrue(info.get("type") == "hotp", info)
def test_11_get_otp(self):
otp = get_otp("hotptoken")
self.assertTrue(otp[2] == "755224", otp)
otp = get_otp(self.serials[0],
current_time=datetime.datetime(2014, 12, 4, 12, 0))
self.assertTrue(otp[2] == "938938", otp)
# the serial does not exist
with self.assertRaises(ResourceNotFoundError):
get_otp("does not exist")
def test_12_get_token_by_otp(self):
tokenobject = get_token_by_otp(get_tokens(), otp="755224")
self.assertTrue(tokenobject.token.serial == "hotptoken", tokenobject)
serial = get_serial_by_otp(get_tokens(), otp="287082")
self.assertTrue(serial == "hotptoken", serial)
# create a second HOTP token, so that we have two tokens,
# that generate the same OTP value
db_token = Token("token2",
tokentype="hotp")
db_token.update_otpkey(self.otpkey)
db_token.save()
self.assertRaises(TokenAdminError, get_serial_by_otp,
get_tokens(), "287922")
db_token.delete()
def test_14_gen_serial(self):
serial = gen_serial(tokentype="hotp")
# check the beginning of the serial
self.assertTrue("OATH0001" in serial, serial)
serial = gen_serial(tokentype="hotp", prefix="blah")
# check the beginning of the serial
self.assertTrue("blah0001" in serial, serial)
serial = gen_serial()
# check the beginning of the serial
self.assertTrue("PIUN0000" in serial, serial)
set_privacyidea_config("SerialLength", 12)
serial = gen_serial(tokentype="hotp")
self.assertTrue("OATH0001" in serial, serial)
self.assertEqual(len(serial), len("OATH") + 12)
set_privacyidea_config("SerialLength", 8)
def test_15_init_token(self):
count = get_tokens(count=True)
self.assertTrue(count == 4, count)
tokenobject = init_token({"serial": "NEW001", "type": "hotp",
"otpkey": "1234567890123456"},
user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(tokenobject.token.tokentype == "hotp",
tokenobject.token)
# Now there is one more token in the database
count = get_tokens(count=True)
self.assertTrue(count == 5, count)
# try to create unknown tokentype
self.assertRaises(TokenAdminError, init_token, {"otpkey": "1234",
"type": "never_know"})
# try to create the same token with another type
self.assertRaises(TokenAdminError, init_token, {"otpkey": "1234",
"serial": "NEW001",
"type": "totp"})
# update the existing token
self.assertTrue(tokenobject.token.otplen == 6, tokenobject.token.otplen)
tokenobject = init_token({"serial": "NEW001", "type": "hotp",
"otpkey": "1234567890123456",
"otplen": 8},
user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(tokenobject.token.otplen == 8, tokenobject.token.otplen)
# add additional realms
tokenobject = init_token({"serial": "NEW002", "type": "hotp",
"otpkey": "1234567890123456",
"realm": self.realm1})
self.assertTrue(self.realm1 in tokenobject.token.get_realms(),
tokenobject.token.get_realms())
tokenobject = init_token({"serial": "NEW003", "type": "hotp",
"otpkey": "1234567890123456"},
tokenrealms=[self.realm1])
self.assertTrue(self.realm1 in tokenobject.token.get_realms(),
tokenobject.token.get_realms())
def test_16_remove_token(self):
self.assertRaises(ParameterError, remove_token)
count1 = get_tokens(count=True)
tokenobject = init_token({"type": "hotp",
"otpkey": "1234567890123456",
"realm": self.realm1})
count2 = get_tokens(count=True)
self.assertTrue(count2 == count1 + 1, count2)
# check for the token association
token_id = tokenobject.token.id
realm_assoc = TokenRealm.query.filter(TokenRealm.token_id == \
token_id).count()
self.assertTrue(realm_assoc == 1, realm_assoc)
# Add a challenge for this token
challenge = Challenge(tokenobject.get_serial(), transaction_id="918273")
challenge.save()
chall_count = Challenge.query.filter(Challenge.serial ==
tokenobject.get_serial()).count()
self.assertTrue(chall_count == 1, chall_count)
# remove the token
count_remove = remove_token(serial=tokenobject.get_serial())
self.assertTrue(count_remove == 1, count_remove)
self.assertTrue(get_tokens(count=True) == count1)
# check for the realm association
realm_assoc = TokenRealm.query.filter(TokenRealm.token_id == \
token_id).count()
self.assertTrue(realm_assoc == 0, realm_assoc)
# check if the challenge is removed
chall_count = Challenge.query.filter(Challenge.serial ==
tokenobject.get_serial()).count()
self.assertTrue(chall_count == 0, chall_count)
def test_16_set_realms(self):
serial = "NEWREALM01"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
realms = get_realms_of_token(serial)
self.assertTrue(realms == [], "{0!s}".format(realms))
set_realms(serial, [self.realm1])
realms = get_realms_of_token(serial)
self.assertTrue(realms == [self.realm1], "{0!s}".format(realms))
remove_token(serial=serial)
realms = get_realms_of_token(serial)
self.assertTrue(realms == [], "{0!s}".format(realms))
def test_17_set_defaults(self):
serial = "SETTOKEN"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456",
"otplen": 8})
self.assertTrue(tokenobject.token.otplen == 8)
set_defaults(serial)
self.assertTrue(tokenobject.token.otplen == 6)
remove_token(serial)
def test_18_assign_token(self):
serial = "ASSTOKEN"
user = User("cornelius", resolver=self.resolvername1,
realm=self.realm1)
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
r = assign_token(serial, user, pin="1234")
self.assertTrue(r)
self.assertEqual(tokenobject.token.first_owner.user_id, "1000")
# token already assigned...
self.assertRaises(TokenAdminError, assign_token, serial,
User("shadow", realm=self.realm1))
# unassign token
r = unassign_token(serial)
self.assertTrue(r)
self.assertEqual(tokenobject.token.first_owner, None)
remove_token(serial)
# assign or unassign a token, that does not exist
self.assertRaises(ResourceNotFoundError, assign_token, serial, user)
self.assertRaises(ResourceNotFoundError, unassign_token, serial)
def test_19_reset_resync(self):
serial = "reset"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
otps = tokenobject.get_multi_otp(count=100)
self.assertTrue(tokenobject.token.count == 0)
# 20: '122407', 21: '505117', 22: '870960', 23: '139843', 24: '631376'
self.assertTrue(otps[2].get("otp").get(20) == "122407", otps[2])
self.assertTrue(tokenobject.token.count == 0)
r = resync_token(serial, "122407", "505117")
self.assertTrue(r)
self.assertTrue(tokenobject.token.count == 22, tokenobject.token.count)
tokenobject.token.failcount = 20
r = reset_token(serial)
self.assertTrue(r)
self.assertTrue(tokenobject.token.failcount == 0)
remove_token(serial)
self.assertRaises(ParameterError, reset_token)
def test_20_pin_token_so_user(self):
serial = "pins"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
# user parameter is wrong
self.assertRaises(ParameterError, set_pin, serial, None, "1234")
# user and serial is missing
self.assertRaises(ParameterError, set_pin)
# now set the pin
self.assertTrue(set_pin(serial, "1234") == 1)
self.assertTrue(tokenobject.token.check_pin("1234"))
self.assertTrue(tokenobject.token.user_pin == "")
self.assertTrue(set_pin_user(serial, "1234") == 1)
self.assertTrue(tokenobject.token.user_pin != "")
self.assertTrue(tokenobject.token.so_pin == "")
self.assertTrue(set_pin_so(serial, "1234") == 1)
self.assertTrue(tokenobject.token.so_pin != "")
remove_token(serial)
def test_21_enable_disable(self):
serial = "enable"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
# an active token does not need to be enabled
r = enable_token(serial)
self.assertTrue(r == 0, r)
r = enable_token(serial, enable=False)
self.assertTrue(r == 1, r)
self.assertTrue(tokenobject.token.active == False,
tokenobject.token.active)
self.assertFalse(is_token_active(serial))
r = enable_token(serial)
self.assertTrue(r == 1, r)
self.assertTrue(is_token_active(serial))
remove_token(serial)
with self.assertRaises(ResourceNotFoundError):
is_token_active(serial)
self.assertRaises(ParameterError, enable_token)
def test_22_set_hashlib(self):
serial = "hashlib"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
r = set_hashlib(serial=serial, hashlib="sha256")
self.assertTrue(r == 1)
hashlib = tokenobject.token.get_info()
self.assertTrue(hashlib.get("hashlib") == "sha256", hashlib)
remove_token(serial)
def test_23_set_otplen(self):
serial = "otplen"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
r = set_otplen(serial=serial, otplen=8)
self.assertTrue(r == 1)
self.assertTrue(tokenobject.token.otplen == 8)
remove_token(serial)
def test_24_set_count_auth(self):
serial = "count_auth"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
r = set_count_auth(serial=serial, count=100)
self.assertTrue(r == 1)
r = set_count_auth(serial=serial, count=101, max=True)
self.assertTrue(r == 1)
r = set_count_auth(serial=serial, count=102, success=True)
self.assertTrue(r == 1)
r = set_count_auth(serial=serial, count=103, max=True, success=True)
self.assertTrue(r == 1)
tinfo = tokenobject.token.get_info()
self.assertTrue(tinfo.get("count_auth") == "100", tinfo)
self.assertTrue(tinfo.get("count_auth_max") == "101", tinfo)
self.assertTrue(tinfo.get("count_auth_success") == "102", tinfo)
self.assertTrue(tinfo.get("count_auth_success_max") == "103", tinfo)
def test_25_add_delete_tokeninfo(self):
serial = "t1"
tokenobject = init_token({"serial": serial, "genkey": 1})
r = add_tokeninfo(serial, "something", "new")
self.assertTrue(r == 1, r)
tinfo1 = tokenobject.token.get_info()
self.assertTrue(tinfo1.get("something") == "new", tinfo1)
# delete existing tokeninfo entry
r = delete_tokeninfo(serial, "something")
self.assertEqual(r, 1)
tinfo2 = tokenobject.token.get_info()
self.assertNotIn("something", tinfo2)
# delete non-existing tokeninfo entry
r = delete_tokeninfo(serial, "somethingelse")
self.assertEqual(r, 1) # this still returns 1, because 1 token was matched!
# tokeninfo has not changed
self.assertEqual(tokenobject.token.get_info(), tinfo2)
# try to delete non-existing tokeninfo
with self.assertRaises(ResourceNotFoundError):
delete_tokeninfo('UNKNOWN-SERIAL', 'something')
remove_token(serial)
def test_26_set_sync_window(self):
serial = "t1"
tokenobject = init_token({"serial": serial, "genkey": 1})
r = set_sync_window(serial, 23)
self.assertTrue(r == 1, r)
self.assertTrue(tokenobject.token.sync_window == 23,
tokenobject.token.sync_window)
remove_token(serial)
def test_27_set_count_window(self):
serial = "t1"
tokenobject = init_token({"serial": serial, "genkey": 1})
r = set_count_window(serial, 45)
self.assertTrue(r == 1, r)
self.assertTrue(tokenobject.token.count_window == 45,
tokenobject.token.count_window)
remove_token(serial)
def test_28_set_description(self):
serial = "t1"
tokenobject = init_token({"serial": serial, "genkey": 1})
r = set_description(serial, "new description")
self.assertTrue(r == 1, r)
self.assertTrue(tokenobject.token.description == "new description",
tokenobject.token.description)
remove_token(serial)
def test_29_get_multi_otp(self):
r = get_multi_otp("hotptoken")
self.assertTrue(r.get("error") == "No count specified", r)
r = get_multi_otp("hotptoken", count=12)
self.assertTrue(r.get("result") is True, r)
self.assertTrue(len(r.get("otp")) == 12, r.get("otp"))
# unknown serial number
with self.assertRaises(ResourceNotFoundError):
get_multi_otp("unknown", count=12)
def test_30_set_max_failcount(self):
serial = "t1"
tokenobject = init_token({"serial": serial, "genkey": 1})
r = set_max_failcount(serial, 112)
self.assertTrue(r == 1, r)
self.assertTrue(tokenobject.token.maxfail == 112,
"{0!s}".format(tokenobject.token.maxfail))
remove_token(serial)
def test_31_copy_token_pin(self):
serial1 = "tcopy1"
tobject1 = init_token({"serial": serial1, "genkey": 1})
r = set_pin(serial1, "secret")
self.assertTrue(r)
serial2 = "tcopy2"
tobject2 = init_token({"serial": serial2, "genkey": 1})
r = copy_token_pin(serial1, serial2)
self.assertTrue(r)
# Now compare the pinhash
self.assertTrue(tobject1.token.pin_hash == tobject2.token.pin_hash,
"{0!s} <> {1!s}".format(tobject1.token.pin_hash,
tobject2.token.pin_hash))
remove_token(serial1)
remove_token(serial2)
def test_32_copy_token_user(self):
serial1 = "tcopy1"
tobject1 = init_token({"serial": serial1, "genkey": 1})
r = assign_token(serial1, User(login="cornelius", realm=self.realm1))
self.assertTrue(r, r)
serial2 = "tcopy2"
tobject2 = init_token({"serial": serial2, "genkey": 1})
r = copy_token_user(serial1, serial2)
assert isinstance(tobject2, TokenClass)
self.assertEqual(tobject2.token.first_owner.user_id, "1000")
self.assertEqual(tobject2.token.first_owner.resolver, self.resolvername1)
# check if the realms where copied:
self.assertTrue(tobject2.get_realms() == [self.realm1])
# check exceptions
self.assertRaises(TokenAdminError, copy_token_user, serial1, "none")
self.assertRaises(TokenAdminError, copy_token_user, "none", serial2)
remove_token(serial1)
remove_token(serial2)
def test_33_lost_token(self):
# create a token with a user
serial1 = "losttoken"
tobject1 = init_token({"serial": serial1, "genkey": 1})
r = assign_token(serial1, User(login="cornelius", realm=self.realm1))
self.assertTrue(r, r)
# call the losttoken
with self.assertRaises(ResourceNotFoundError):
lost_token("doesnotexist")
validity = 10
r = lost_token(serial1)
end_date = datetime.datetime.now(tzlocal()) + datetime.timedelta(days=validity)
"""
r = {'end_date': '16/12/14 23:59',
'pin': True, 'valid_to': 'xxxx', 'init': True, 'disable': 1,
'user': True, 'serial': 'lostlosttoken', 'password':
'EC7YRgr)ss9LcE*('}
"""
self.assertTrue(r.get("pin"), r)
self.assertTrue(r.get("init"), r)
self.assertTrue(r.get("user"), r)
self.assertTrue(r.get("serial") == "lost{0!s}".format(serial1), r)
self.assertTrue(parser.parse(r.get("end_date")) <= end_date, r)
remove_token("losttoken")
remove_token("lostlosttoken")
def test_34_check_token_list(self):
# We can not authenticate with an unknown type
# Such a token will not be returned by get_tokens...
db_token = Token("serial72", tokentype="unknown")
db_token.save()
# set a matching OTP PIN for our hotp token
set_pin("hotptoken", "hotppin40")
tokenobject_list = get_tokens()
# the HOTP token has the correct PIN but wrong otp value
# The failcounter is increased
hotp_tokenobject = get_tokens(serial="hotptoken")[0]
hotp_tokenobject.set_pin("hotppin")
hotp_tokenobject.save()
old_failcount = hotp_tokenobject.token.failcount
res, reply = check_token_list(tokenobject_list, "hotppin40123456")
self.assertFalse(res)
failcount = hotp_tokenobject.token.failcount
self.assertTrue(failcount == old_failcount + 1, (old_failcount,
failcount))
# if there is no token with at least a correct pin, we increase all
# failcounters
hotp_tokenobject = get_tokens(serial="hotptoken")[0]
old_failcount = hotp_tokenobject.token.failcount
res, reply = check_token_list(tokenobject_list, "everythingiswrong")
self.assertFalse(res)
failcount = hotp_tokenobject.token.failcount
self.assertTrue(failcount == old_failcount + 1, (old_failcount,
failcount))
# Now we do some successful auth with the HOTP token
tokenobject_list = get_tokens(serial="hotptoken")
""" Truncated
Count Hexadecimal Decimal HOTP
0 4c93cf18 1284755224 755224
1 41397eea 1094287082 287082
2 82fef30 137359152 359152
3 66ef7655 1726969429 969429
4 61c5938a 1640338314 338314
5 33c083d4 868254676 254676
6 7256c032 1918287922 287922
7 4e5b397 82162583 162583
8 2823443f 673399871 399871
9 2679dc69 645520489 520489
10 403154
11 481090
12 868912
13 736127
"""
hotp_tokenobject = tokenobject_list[0]
old_counter = hotp_tokenobject.token.count
res, reply = check_token_list(tokenobject_list, "hotppin399871")
self.assertTrue(res)
# check if the counter increased
self.assertTrue(old_counter < hotp_tokenobject.token.count,
(old_counter, hotp_tokenobject.token.count))
# but was it also increased in the database?
tokenobject_list_new = get_tokens(serial="hotptoken")
hotp_tokenobject_new = tokenobject_list_new[0]
self.assertTrue(old_counter < hotp_tokenobject_new.token.count,
(old_counter, hotp_tokenobject.token.count))
# False authentication
old_failcount = hotp_tokenobject.token.failcount
res, reply = check_token_list(tokenobject_list, "hotppin000000")
self.assertFalse(res)
# check the failcounter increased
self.assertTrue(old_failcount + 1 == hotp_tokenobject.token.failcount)
# Successful auth. The failcount needs to be resetted
res, reply = check_token_list(tokenobject_list, "hotppin520489")
self.assertTrue(res)
self.assertTrue(hotp_tokenobject.token.failcount == 0)
# Now we disable the hotp_tokenobject. If the token is disabled,
# we must not be able to authenticate anymore with this very token.
# But if the OTP value is valid, the counter is increased, anyway!
old_counter = hotp_tokenobject.token.count
hotp_tokenobject.enable(False)
res, reply = check_token_list(tokenobject_list, "hotppin403154")
self.assertFalse(res)
self.assertTrue("Token is disabled" in reply.get("message"))
self.assertEqual(old_counter + 1, hotp_tokenobject.token.count)
# enable the token again
hotp_tokenobject.enable(True)
# Set HOTP as challenge response
set_policy("check_token_list_CR", scope=SCOPE.AUTH, action="{0!s}=HOTP".format(
ACTION.CHALLENGERESPONSE))
hotp_tokenobject.add_tokeninfo("next_pin_change", u"{0!s}".format(datetime.datetime(2019, 1, 7, 0, 0)))
hotp_tokenobject.add_tokeninfo("next_password_change", u"{0!s}".format(datetime.datetime(2019, 1, 7, 0, 0)))
# Now the HOTP is a valid C/R token
res, reply = check_token_list(tokenobject_list, "hotppin")
self.assertFalse(res)
self.assertTrue("multi_challenge" in reply)
transaction_id = reply.get("transaction_id")
res, reply = check_token_list(tokenobject_list, "481090", options={"transaction_id": transaction_id})
self.assertTrue(res)
# Create a challenge, but deactivate the token in the meantime:
hotp_tokenobject.token.counter = 9
hotp_tokenobject.save()
res, reply = check_token_list(tokenobject_list, "hotppin")
self.assertFalse(res)
self.assertTrue("multi_challenge" in reply)
transaction_id = reply.get("transaction_id")
# deactivate token
hotp_tokenobject.enable(False)
hotp_tokenobject.save()
res, reply = check_token_list(tokenobject_list, "481090", options={"transaction_id": transaction_id})
self.assertFalse(res)
# Have a challenge response token, but it is disabled.
hotp_tokenobject.token.counter = 9
hotp_tokenobject.save()
res, reply = check_token_list(tokenobject_list, "hotppin")
self.assertFalse(res)
self.assertFalse("multi_challenge" in reply)
self.assertEqual(reply.get("message"), "No active challenge response token found")
hotp_tokenobject.enable()
hotp_tokenobject.save()
delete_policy("check_token_list_CR")
def test_35_check_serial_pass(self):
hotp_tokenobject = get_tokens(serial="hotptoken")[0]
hotp_tokenobject.set_pin("hotppin")
hotp_tokenobject.token.count = 10
hotp_tokenobject.save()
with self.assertRaises(ResourceNotFoundError):
check_serial_pass("XXXXXXXXX", "password")
#r = get_multi_otp("hotptoken", count=20)
#self.assertTrue(r == 0, r)
# 0: '520489', 1: '403154', 2: '481090', 3: '868912',
# 4: '736127', 5: '229903', 6: '436521', 7: '186581',
# 8: '447589', 9: '903435', 10: '578337', 11: '328281',
# 12: '191635', 13: '184416', 14: '574561', 15: '797908'
r, reply = check_serial_pass("hotptoken", "hotppin481090")
self.assertTrue(r)
# the same OTP value must not match!
# cko
r, reply = check_serial_pass("hotptoken", "hotppin481090")
self.assertFalse(r)
def test_36_check_user_pass(self):
hotp_tokenobject = get_tokens(serial="hotptoken")[0]
user = User("shadow", realm=self.realm1)
r, reply = check_user_pass(user, "passwordasdf")
self.assertFalse(r)
self.assertTrue(reply.get("message") == 'The user has no tokens '
'assigned', "{0!s}".format(reply))
user = User("cornelius", realm=self.realm1)
r, reply = check_user_pass(user, "hotppin868912")
self.assertTrue(r)
r, reply = check_user_pass(user, "hotppin736127")
#r = get_multi_otp("hotptoken", count=20)
#self.assertTrue(r == 0, r)
# 0: '520489', 1: '403154', 2: '481090', 3: '868912',
# 4: '736127', 5: '229903', 6: '436521', 7: '186581',
# 8: '447589', 9: '903435', 10: '578337', 11: '328281',
# 12: '191635', 13: '184416', 14: '574561', 15: '797908'
def test_36b_check_nonascii_pin(self):
user = User("cornelius", self.realm1)
serial = "nonasciipin"
token = init_token({"type": "hotp",
"otpkey": self.otpkey,
"pin": u"ünicøde",
"serial": serial}, user)
r = check_user_pass(user, u"µröng287082")
self.assertEqual(r[0], False)
self.assertEqual(r[1]['message'], 'wrong otp pin')
r = check_user_pass(user, u"ünicøde287082")
self.assertEqual(r[0], True)
r = check_user_pass(user, u"ünicøde666666")
self.assertEqual(r[0], False)
self.assertEqual(r[1]['message'], 'wrong otp value')
remove_token(serial)
def test_37_challenge(self):
# We create a challenge by first sending the PIN of the HOTP token
# then we answer the challenge by sending the OTP.
num1 = Challenge.query.filter(Challenge.serial == "hotptoken").count()
# The correct PIN will create a challenge
r, reply = check_serial_pass("hotptoken", "hotppin")
self.assertTrue(r is False, r)
num2 = Challenge.query.filter(Challenge.serial == "hotptoken").count()
# check that the challenge is created
self.assertTrue(num1 + 1 == num2, (num1, num2))
self.assertTrue(type(reply) == dict, reply)
transaction_id = reply.get("transaction_id","")
self.assertTrue(len(transaction_id) > 10, reply)
# Challenge Response, with the transaction id
r, reply = check_serial_pass("hotptoken", "436521",
{"transaction_id": transaction_id})
self.assertTrue(r)
self.assertTrue(reply.get("message") == "Found matching challenge",
reply)
def test_40_dynamic_policies(self):
p = get_dynamic_policy_definitions()
self.assertTrue("user" in p, p)
self.assertTrue("admin" in p, p)
p = get_dynamic_policy_definitions(scope="admin")
self.assertTrue("enrollTOTP" in p, p)
self.assertTrue("enrollHOTP" in p, p)
self.assertTrue("enrollPW" in p, p)
# The SPASS token can have his own PIN policy
self.assertTrue("spass_otp_pin_contents" in p, p)
self.assertTrue("spass_otp_pin_maxlength" in p, p)
self.assertTrue("spass_otp_pin_minlength" in p, p)
def test_41_get_tokens_paginate(self):
# create some tokens
for serial in ["S1", "S2", "S3", "A8", "B", "X"]:
init_token({"serial": serial, "type": "hotp",
"otpkey": self.otpkey,
"realm": self.realm1})
token_count = 15
# return pagination
tokens = get_tokens_paginate(sortby=Token.serial, page=1, psize=5)
self.assertTrue(len(tokens.get("tokens")) == 5,
len(tokens.get("tokens")))
self.assertEqual(tokens.get("count"), token_count)
self.assertTrue(tokens.get("next") == 2, tokens.get("next"))
self.assertTrue(tokens.get("prev") is None, tokens.get("prev"))
tokens = get_tokens_paginate(sortby=Token.serial, page=2, psize=5)
self.assertEqual(len(tokens.get("tokens")), 5)
self.assertEqual(tokens.get("count"), token_count)
self.assertEqual(tokens.get("next"), 3)
self.assertEqual(tokens.get("prev"), 1)
tokens = get_tokens_paginate(sortby=Token.serial, page=3, psize=5)
self.assertEqual(len(tokens.get("tokens")), 4)
self.assertEqual(tokens.get("count"), token_count)
self.assertEqual(tokens.get("next"), None)
self.assertEqual(tokens.get("prev"), 2)
# Test filtering and sorting
tokens = get_tokens_paginate(assigned=True, page=1)
self.assertTrue(len(tokens.get("tokens")) == 2,
len(tokens.get("tokens")))
self.assertTrue(tokens.get("count") == 2, tokens.get("count"))
self.assertTrue(tokens.get("next") is None, tokens.get("next"))
self.assertTrue(tokens.get("prev") is None, tokens.get("prev"))
tokens = get_tokens_paginate(sortby=Token.serial, page=1,
sortdir="desc")
self.assertTrue(len(tokens.get("tokens")), token_count-1)
self.assertEqual(tokens.get("count"), token_count)
self.assertTrue(tokens.get("next") is None, tokens.get("next"))
self.assertTrue(tokens.get("prev") is None, tokens.get("prev"))
# Test to retrieve tokens of user cornelius
tokens = get_tokens_paginate(user=User("cornelius", "realm1"))
self.assertTrue(len(tokens.get("tokens")) == 2,
len(tokens.get("tokens")))
# test to retrieve tokens with not strict serial matching
tokens = get_tokens_paginate(serial="hotp*")
self.assertTrue(len(tokens.get("tokens")) == 1,
len(tokens.get("tokens")))
def test_42_sort_tokens(self):
# return pagination
tokendata = get_tokens_paginate(sortby=Token.serial, page=1, psize=5)
self.assertTrue(len(tokendata.get("tokens")) == 5,
len(tokendata.get("tokens")))
# sort ascending
tokendata = get_tokens_paginate(sortby=Token.serial, page=1, psize=100,
sortdir="asc")
self.assertTrue(len(tokendata.get("tokens")) >= 9,
len(tokendata.get("tokens")))
tokens = tokendata.get("tokens")
self.assertTrue(tokens[0].get("serial") == "A8",
tokens[0])
self.assertTrue(tokens[-1].get("serial") == "hotptoken",
tokens[-1])
# Reverse sorting
tokendata = get_tokens_paginate(sortby=Token.serial, page=1, psize=100,
sortdir="desc")
tokens = tokendata.get("tokens")
self.assertTrue(tokens[0].get("serial") == "hotptoken")
self.assertTrue(tokens[-1].get("serial") == "A8")
# sort with string column
tokendata = get_tokens_paginate(sortby="serial", page=1, psize=100,
sortdir="asc")
tokens = tokendata.get("tokens")
self.assertTrue(tokens[-1].get("serial") == "hotptoken")
self.assertTrue(tokens[0].get("serial") == "A8")
tokendata = get_tokens_paginate(sortby="serial", page=1, psize=100,
sortdir="desc")
tokens = tokendata.get("tokens")
self.assertTrue(tokens[0].get("serial") == "hotptoken")
self.assertTrue(tokens[-1].get("serial") == "A8")
def test_43_encryptpin(self):
serial = "ENC01"
# encrypt pin on init
init_token({"serial": serial,
"genkey": 1,
"pin": "Hallo",
"encryptpin": True})
tokenobj = get_tokens(serial=serial)[0]
self.assertEqual(tokenobj.token.pin_hash[0:2], "@@")
# set a hashed pin
set_pin(serial, "test", encrypt_pin=False)
tokenobj = get_tokens(serial=serial)[0]
self.assertTrue(tokenobj.token.pin_hash[0:2] != "@@")
# set an encrypted PIN
set_pin(serial, "test", encrypt_pin=True)
tokenobj = get_tokens(serial=serial)[0]
self.assertEqual(tokenobj.token.pin_hash[0:2], "@@")
# assign the token with a PIN
assign_token(serial, User(login="cornelius", realm=self.realm1),
pin="WellWell", encrypt_pin=True)
# check if pinhash starts with "@@" to indicate the encryption
tokenobj = get_tokens(serial=serial)[0]
self.assertEqual(tokenobj.token.pin_hash[0:2], "@@")
def test_44_validity_period(self):
serial = "VAL01"
init_token({"serial": serial,
"genkey": 1,
"pin": "Hallo"})
tokenobj = get_tokens(serial=serial)[0]
r = set_validity_period_start(serial, None, "2015-05-22T20:21+0200")
self.assertEqual(r, 1)
r = set_validity_period_end(serial, None, "2015-05-28T20:22+0200")
self.assertEqual(r, 1)
vp = tokenobj.get_validity_period_start()
self.assertEqual(vp, "2015-05-22T20:21+0200")
vp = tokenobj.get_validity_period_end()
self.assertEqual(vp, "2015-05-28T20:22+0200")
def test_45_check_realm_pass(self):
self.setUp_user_realms()
# create a bunch of tokens in the realm
# disabled token
serial = "inactive"
init_token({"serial": serial,
"otpkey": self.otpkey,
"pin": serial}, User("cornelius", self.realm1))
enable_token(serial, False)
# not assigned token
serial = "not_assigned"
init_token({"serial": serial,
"otpkey": self.otpkey,
"pin": serial}, tokenrealms=[self.realm1])
# a normal token
serial = "assigned"
init_token({"serial": serial,
"otpkey": self.otpkey,
"pin": serial}, User("cornelius", self.realm1))
# check if the tokens were created accordingly
tokens = get_tokens(realm=self.realm1, tokentype="hotp",
assigned=False, serial="not_assigned")
self.assertEqual(len(tokens), 1)
tokens = get_tokens(realm=self.realm1, tokentype="hotp",
active=False, serial="inactive")
self.assertEqual(len(tokens), 1)
tokens = get_tokens(realm=self.realm1, tokentype="hotp",
active=True, assigned=True, serial="assigned")
self.assertEqual(len(tokens), 1)
# an inactive token does not match
r = check_realm_pass(self.realm1, "inactive" + "287082")
self.assertEqual(r[0], False)
# The remaining tokens are checked, but the pin does not match,
# so we get "wrong otp pin"
self.assertEqual(r[1].get("message"), "wrong otp pin")
# an unassigned token does not match
r = check_realm_pass(self.realm1, "unassigned" + "287082")
self.assertEqual(r[0], False)
# The remaining tokens are checked, but the pin does not match,
# so we get "wrong otp pin"
self.assertEqual(r[1].get("message"), "wrong otp pin")
# a token assigned to a user does match
r = check_realm_pass(self.realm1, "assigned" + "287082")
# One token in the realm matches the pin and the OTP value
self.assertEqual(r[0], True)
# The remaining tokens are checked, but the pin does not match,
# so we get "wrong otp pin"
self.assertEqual(r[1].get("message"), "matching 1 tokens")
# try an unknown realm
r = check_realm_pass(self.realm2, "assigned" + self.valid_otp_values[2])
self.assertFalse(r[0])
self.assertEqual(r[1].get("message"), "There is no active and assigned "
"token in this realm")
# check for optional parameter exclude/include type
r = check_realm_pass(self.realm1, 'assigned' + self.valid_otp_values[2],
exclude_types='hotp')
self.assertFalse(r[0])
self.assertEqual(r[1].get("message"), "There is no active and assigned "
"token in this realm, included types: None, "
"excluded types: hotp")
r = check_realm_pass(self.realm1, 'assigned' + self.valid_otp_values[2],
include_types='totp')
self.assertFalse(r[0])
self.assertEqual(r[1].get("message"), "There is no active and assigned "
"token in this realm, included types: totp, "
"excluded types: None")
r = check_realm_pass(self.realm1, 'assigned' + self.valid_otp_values[2],
exclude_types='totp')
self.assertTrue(r[0])
self.assertEqual(r[1].get("message"), "matching 1 tokens")
# check that include_types precedes exclude_types
r = check_realm_pass(self.realm1, 'assigned' + self.valid_otp_values[3],
include_types='hotp', exclude_types='hotp')
self.assertTrue(r[0])
self.assertEqual(r[1].get("message"), "matching 1 tokens")
remove_token(serial='not_assigned')
remove_token(serial='inactive')
remove_token(serial='assigned')
def test_46_init_with_validity_period(self):
token = init_token({"type": "hotp",
"genkey": 1,
"validity_period_start": "2014-05-22T22:00+0200",
"validity_period_end": "2014-10-23T23:00+0200"})
self.assertEqual(token.type, "hotp")
start = token.get_tokeninfo("validity_period_start")
end = token.get_tokeninfo("validity_period_end")
self.assertEqual(start, "2014-05-22T22:00+0200")
self.assertEqual(end, "2014-10-23T23:00+0200")
def test_47_use_yubikey_and_hotp(self):
# fix problem https://github.com/privacyidea/privacyidea/issues/279
user = User("cornelius", self.realm1)
token = init_token({"type": "hotp",
"otpkey": self.otpkey,
"pin": "pin47"}, user)
token = init_token({"type": "yubikey",
"otpkey": self.otpkey,
"pin": "pin47"}, user)
r = check_user_pass(user, "pin47888888")
self.assertEqual(r[0], False)
self.assertEqual(r[1].get('message'), "wrong otp value")
def test_48_challenge_request_two_tokens(self):
# test the challenge request of two tokens. One token is active,
# the other is disabled
user = User("cornelius", self.realm1)
pin = "test48"
token_a = init_token({"serial": "CR2A",
"type": "hotp",
"otpkey": self.otpkey,
"pin": pin}, user)
token_b = init_token({"serial": "CR2B",
"type": "hotp",
"otpkey": self.otpkey,
"pin": pin}, user)
# disable token_b
enable_token("CR2B", False)
# Allow HOTP for chalresp
set_policy("test48", scope=SCOPE.AUTH, action="{0!s}=HOTP".format(
ACTION.CHALLENGERESPONSE))
r, r_dict = check_token_list([token_a, token_b], pin, user)
self.assertFalse(r)
self.assertTrue("message" in r_dict)
self.assertTrue("transaction_id" in r_dict)
transaction_id = r_dict.get("transaction_id")
# Now we try authenticate:
r, r_dict = check_token_list([token_a, token_b], self.valid_otp_values[1], user,
options={"transaction_id": transaction_id})
self.assertTrue(r)
# New challenge
r, r_dict = check_token_list([token_a, token_b], pin, user)
self.assertTrue("transaction_id" in r_dict)
transaction_id = r_dict.get("transaction_id")
# Now we run a bunch of failing responses to the challenge
for i in range(0, 10):
r, r_dict = check_token_list([token_a, token_b], self.valid_otp_values[1], user,
options={"transaction_id": transaction_id})
self.assertFalse(r)
# Now we try the next value, which fails
r, r_dict = check_token_list([token_a, token_b], self.valid_otp_values[2], user,
options={"transaction_id": transaction_id})
self.assertFalse(r)
self.assertEqual(r_dict.get("message"), "Challenge matches, but token is not fit for challenge. Failcounter exceeded")
remove_token("CR2A")
remove_token("CR2B")
delete_policy("test48")
def test_49_challenge_request_multiple_tokens(self):
# Test the challenges for multiple active tokens
user = User("cornelius", self.realm1)
pin = "test49"
token_a = init_token({"serial": "CR2A",
"type": "hotp",
"otpkey": OTPKE2,
"pin": pin}, user)
token_b = init_token({"serial": "CR2B",
"type": "hotp",
"otpkey": self.otpkey,
"pin": pin}, user)
set_policy("test49", scope=SCOPE.AUTH, action="{0!s}=HOTP".format(
ACTION.CHALLENGERESPONSE))
# both tokens will be a valid challenge response token!
r, r_dict = check_token_list([token_a, token_b], pin, user)
multi_challenge = r_dict.get("multi_challenge")
transaction_id = r_dict.get("transaction_id")
self.assertEqual(multi_challenge[0].get("serial"), "CR2A")
self.assertEqual(transaction_id,
multi_challenge[0].get("transaction_id"))
self.assertEqual(transaction_id,
multi_challenge[1].get("transaction_id"))
self.assertEqual(multi_challenge[1].get("serial"), "CR2B")
# There are two challenges in the database
r = Challenge.query.filter(Challenge.transaction_id ==
transaction_id).all()
self.assertEqual(len(r), 2)
# Check the second response to the challenge, the second step in
# challenge response:
r, r_dict = check_token_list([token_a, token_b], "287082", user,
options={"transaction_id": transaction_id})
# The response is successfull
self.assertTrue(r)
# The matching token was CR2B
self.assertEqual(r_dict.get("serial"), "CR2B")
# All challenges of the transaction_id have been deleted on
# successful authentication
r = Challenge.query.filter(Challenge.transaction_id ==
transaction_id).all()
self.assertEqual(len(r), 0)
remove_token("CR2A")
remove_token("CR2B")
delete_policy("test49")
def test_50_otpkeyformat(self):
otpkey = b"\x01\x02\x03\x04\x05\x06\x07\x08\x0A"
checksum = hashlib.sha1(otpkey).digest()[:4]
# base32check(otpkey) = 'FIQVUTQBAIBQIBIGA4EAU==='
# hex encoding
tokenobject = init_token({"serial": "NEW001", "type": "hotp",
"otpkey": binascii.hexlify(otpkey),
"otpkeyformat": "hex"},
user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(tokenobject.token.tokentype == "hotp",
tokenobject.token)
self.assertEqual(tokenobject.token.get_otpkey().getKey(),
binascii.hexlify(otpkey))
remove_token("NEW001")
# unknown encoding
self.assertRaisesRegexp(ParameterError,
"Unknown OTP key format",
init_token,
{"serial": "NEW001",
"type": "hotp",
"otpkey": binascii.hexlify(otpkey),
"otpkeyformat": "foobar"},
user=User(login="cornelius",
realm=self.realm1))
remove_token("NEW001")
# successful base32check encoding
base32check_encoding = b32encode_and_unicode(checksum + otpkey).strip("=")
tokenobject = init_token({"serial": "NEW002", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(tokenobject.token.tokentype == "hotp",
tokenobject.token)
self.assertEqual(tokenobject.token.get_otpkey().getKey(),
binascii.hexlify(otpkey))
remove_token("NEW002")
# successful base32check encoding, but lower case
base32check_encoding = b32encode_and_unicode(checksum + otpkey).strip("=")
base32check_encoding = base32check_encoding.lower()
tokenobject = init_token({"serial": "NEW002", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(tokenobject.token.tokentype == "hotp",
tokenobject.token)
self.assertEqual(tokenobject.token.get_otpkey().getKey(),
binascii.hexlify(otpkey))
remove_token("NEW002")
# base32check encoding with padding
base32check_encoding = b32encode_and_unicode(checksum + otpkey)
tokenobject = init_token({"serial": "NEW003", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius",
realm=self.realm1))
self.assertTrue(tokenobject.token.tokentype == "hotp",
tokenobject.token)
self.assertEqual(tokenobject.token.get_otpkey().getKey(),
binascii.hexlify(otpkey))
remove_token("NEW003")
# invalid base32check encoding (incorrect checksum due to typo)
base32check_encoding = b32encode_and_unicode(checksum + otpkey)
base32check_encoding = "A" + base32check_encoding[1:]
self.assertRaisesRegexp(ParameterError,
"Incorrect checksum",
init_token,
{"serial": "NEW004", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius", realm=self.realm1))
remove_token("NEW004") # TODO: Token is created anyway?
# invalid base32check encoding (missing four characters => incorrect checksum)
base32check_encoding = b32encode_and_unicode(checksum + otpkey)
base32check_encoding = base32check_encoding[:-4]
self.assertRaisesRegexp(ParameterError,
"Incorrect checksum",
init_token,
{"serial": "NEW005", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius", realm=self.realm1))
remove_token("NEW005") # TODO: Token is created anyway?
# invalid base32check encoding (too many =)
base32check_encoding = b32encode_and_unicode(checksum + otpkey)
base32check_encoding = base32check_encoding + "==="
self.assertRaisesRegexp(ParameterError,
"Invalid base32",
init_token,
{"serial": "NEW006", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius", realm=self.realm1))
remove_token("NEW006") # TODO: Token is created anyway?
# invalid base32check encoding (wrong characters)
base32check_encoding = b32encode_and_unicode(checksum + otpkey)
base32check_encoding = "1" + base32check_encoding[1:]
self.assertRaisesRegexp(ParameterError,
"Invalid base32",
init_token,
{"serial": "NEW006", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius", realm=self.realm1))
remove_token("NEW006") # TODO: Token is created anyway?
# invalid key (too short)
base32check_encoding = b32encode_and_unicode(b'Yo')
self.assertRaisesRegexp(ParameterError,
"Too short",
init_token,
{"serial": "NEW006", "type": "hotp",
"otpkey": base32check_encoding,
"otpkeyformat": "base32check"},
user=User(login="cornelius", realm=self.realm1))
remove_token("NEW006")
def test_51_tokenkind(self):
# A normal token will be of kind "software"
tok = init_token({"type": "totp", "otpkey": self.otpkey})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.SOFTWARE)
tok.delete_token()
# A token, that is imported, will be of kind "hardware"
tok = init_token({"type": "totp", "otpkey": self.otpkey},
tokenkind=TOKENKIND.HARDWARE)
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.HARDWARE)
tok.delete_token()
# A yubikey initialized as HOTP is hardware
tok = init_token({"type": "hotp", "otpkey": self.otpkey,
"serial": "UBOM111111"})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.HARDWARE)
tok.delete_token()
# A yubikey and yubicloud tokentype is hardware
tok = init_token({"type": "yubikey", "otpkey": self.otpkey})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.HARDWARE)
tok.delete_token()
tok = init_token({"type": "yubico",
"yubico.tokenid": "123456789012"})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.HARDWARE)
tok.delete_token()
# 4eyes, radius and remote are virtual tokens
tok = init_token({"type": "radius", "radius.identifier": "1",
"radius.user": "hans"})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.VIRTUAL)
tok.delete_token()
tok = init_token({"type": "remote", "remote.server": "1",
"radius.user": "hans"})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.VIRTUAL)
tok.delete_token()
tok = init_token({"type": "4eyes", "4eyes": "realm1",
"separator": ","})
kind = tok.get_tokeninfo("tokenkind")
self.assertEqual(kind, TOKENKIND.VIRTUAL)
tok.delete_token()
def test_52_import_token(self):
tok = import_token("IMP001",
{"type": "totp",
"timeShift": "-122",
"timeStep": "30",
"otpkey": self.otpkey,
"counter": "121212"})
self.assertEqual(tok.get_tokeninfo("timeShift"), "-122")
self.assertEqual(tok.get_tokeninfo("timeStep"), "30")
self.assertEqual(tok.get_otp_count(), 121212)
remove_token("IMP001")
def test_53_import_token(self):
# Import token from a file with a user object
tok = import_token("IMP002",
{"type": "totp",
"otpkey": self.otpkey,
"user": {"username": "cornelius",
"resolver": self.resolvername1,
"realm": self.realm1}})
self.assertEqual(tok.get_user_id(), "1000")
self.assertEqual(tok.get_user_displayname(), (u'cornelius_realm1', u'Cornelius '))
remove_token("IMP002")
def test_54_helper_functions(self):
user = User("cornelius", self.realm1)
# unassign all tokens of cornelius, assign S1 and S2
unassign_token(serial=None, user=user)
assign_token(serial="S1", user=user)
assign_token(serial="S2", user=user)
self.assertEqual(get_one_token(serial="S1", user=None).token.serial, "S1")
self.assertEqual(get_one_token(serial="hotptoken", user=None).token.serial, "hotptoken")
self.assertEqual(get_one_token(serial="S1", user=user).token.serial, "S1")
with self.assertRaises(ResourceNotFoundError):
get_one_token(serial="doesnotexist", user=None)
with self.assertRaises(ResourceNotFoundError):
get_one_token(serial="hotptoken", user=user)
with self.assertRaises(ResourceNotFoundError):
get_one_token(serial="doesnotexist", user=user)
with self.assertRaises(ResourceNotFoundError):
get_one_token(serial="doesnotexist", user=user)
# exact match
with self.assertRaises(ResourceNotFoundError):
get_one_token(serial="*", user=None)
# more than 1 token
with self.assertRaises(ParameterError):
get_one_token(user=user)
# get_tokens_from_serial_or_user tests
shadow = User("shadow", self.realm1)
self.assertEqual(len(get_tokens_from_serial_or_user(serial=None, user=user)), 2)
self.assertEqual(len(get_tokens_from_serial_or_user(serial=None, user=shadow)), 0)
self.assertEqual(len(get_tokens_from_serial_or_user(serial="S1", user=user)), 1)
self.assertEqual(len(get_tokens_from_serial_or_user(serial="S2", user=user)), 1)
self.assertEqual(len(get_tokens_from_serial_or_user(serial="hotptoken", user=None)), 1)
with self.assertRaises(ResourceNotFoundError):
get_tokens_from_serial_or_user(serial="S*", user=None)
with self.assertRaises(ResourceNotFoundError):
get_tokens_from_serial_or_user(serial="doesnotexist", user=None)
with self.assertRaises(ResourceNotFoundError):
get_tokens_from_serial_or_user(serial="S1", user=shadow)
unassign_token(serial=None, user=user)
def test_55_get_tokens_paginated_generator(self):
def flatten_tokens(l):
return [token.token.id for l2 in l for token in l2]
# serial72 token has invalid type. Check behavior and remove it.
self.assertEqual(list(get_tokens_paginated_generator(serial_wildcard="serial*")), [[]])
Token.query.filter_by(serial="serial72").delete()
all_matching_tokens = get_tokens(serial_wildcard="S*")
lists1 = list(get_tokens_paginated_generator(serial_wildcard="S*"))
self.assertEqual(len(lists1), 1)
self.assertEqual(len(lists1[0]), 6)
lists2 = list(get_tokens_paginated_generator(serial_wildcard="S*", psize=2))
self.assertEqual(len(lists2), 3)
self.assertEqual(len(lists2[0]), 2)
self.assertEqual(len(lists2[1]), 2)
self.assertEqual(len(lists2[2]), 2)
lists3 = list(get_tokens_paginated_generator(serial_wildcard="S*", psize=3))
self.assertEqual(len(lists3), 2)
self.assertEqual(len(lists3[0]), 3)
self.assertEqual(len(lists3[1]), 3)
lists4 = list(get_tokens_paginated_generator(serial_wildcard="S*", psize=4))
self.assertEqual(len(lists4), 2)
self.assertEqual(len(lists4[0]), 4)
self.assertEqual(len(lists4[1]), 2)
lists5 = list(get_tokens_paginated_generator(serial_wildcard="S*", psize=6))
self.assertEqual(len(lists5), 1)
self.assertEqual(len(lists5[0]), 6)
self.assertEqual(set([t.token.id for t in all_matching_tokens]), set(flatten_tokens(lists1)))
self.assertEqual(flatten_tokens(lists1), flatten_tokens(lists2))
self.assertEqual(flatten_tokens(lists2), flatten_tokens(lists3))
self.assertEqual(flatten_tokens(lists3), flatten_tokens(lists4))
self.assertEqual(flatten_tokens(lists4), flatten_tokens(lists5))
lists6 = list(get_tokens_paginated_generator(serial_wildcard="*DOESNOTEXIST*"))
self.assertEqual(lists6, [])
def test_56_get_tokens_paginated_generator_removal(self):
all_serials = set(t.token.serial for t in get_tokens(serial_wildcard="S*"))
# Test proper behavior if a matching token is deleted while paginating
gen = get_tokens_paginated_generator(serial_wildcard="S*", psize=3)
list1 = next(gen)
remove_token(list1[0].token.serial)
list2 = next(gen)
# Check that we did not miss any tokens
self.assertEqual(set(t.token.serial for t in list1 + list2), all_serials)
def test_0057_check_invalid_serial(self):
# This is an invalid serial, which will trigger an exception
self.assertRaises(Exception, reset_token, "hans wurst")
self.assertRaises(Exception, init_token,
{"serial": "invalid/chars",
"genkey": 1})
def test_57_registration_token_no_auth_counter(self):
# Test, that a registration token is deleted even if no_auth_counter is used.
from privacyidea.lib.config import set_privacyidea_config, delete_privacyidea_config
set_privacyidea_config("no_auth_counter", 1)
tok = init_token({"type": "registration"})
serial = tok.token.serial
detail = tok.get_init_detail()
reg_password = detail.get("registrationcode")
r, rdetail = check_token_list([tok], reg_password)
self.assertTrue(r)
self.assertEqual(rdetail["message"], "matching 1 tokens")
delete_privacyidea_config("no_auth_counter")
# Check that the token is deleted
toks = get_tokens(serial=serial)
self.assertEqual(len(toks), 0)
def test_58_check_old_pin(self):
serial = "pins"
tokenobject = init_token({"serial": serial,
"otpkey": "1234567890123456"})
# now set the old pin
from privacyidea.lib.crypto import hash
tokenobject.token.pin_hash = hash("1234", b"1234567890")
tokenobject.token.pin_seed = hexlify_and_unicode(b"1234567890")
tokenobject.token.save()
r = tokenobject.token.check_pin("1234")
self.assertTrue(r)
remove_token(serial)
def test_59_weigh_token_types(self):
class dummy_token(object):
def __init__(self, type):
self.type = type
self.assertEqual(1000, weigh_token_type(dummy_token("push")))
self.assertTrue(weigh_token_type(dummy_token("push")) > weigh_token_type(dummy_token("hotp")))
self.assertTrue(weigh_token_type(dummy_token("push")) > weigh_token_type(dummy_token("HOTP")))
self.assertTrue(weigh_token_type(dummy_token("PUSH")) > weigh_token_type(dummy_token("hotp")))
class TokenOutOfBandTestCase(MyTestCase):
def test_00_create_realms(self):
self.setUp_user_realms()
def test_01_failcounter_no_increase(self):
# The fail counter for tiqr tokens will not increase, since this
# is a tokenmode outofband.
user = User(login="cornelius", realm=self.realm1)
pin1 = "pin1"
token1 = init_token({"serial": pin1, "pin": pin1,
"type": "tiqr", "genkey": 1}, user=user)
r = token1.get_failcount()
self.assertEqual(r, 0)
r, r_dict = check_token_list([token1], pin1, user=user, options={})
self.assertFalse(r)
transaction_id = r_dict.get("transaction_id")
# Now we check the status of the challenge several times and verify that the
# failcounter is not increased:
for i in range(1, 10):
r, r_dict = check_token_list([token1], "", user=user, options={"transaction_id": transaction_id})
self.assertFalse(r)
self.assertEqual(r_dict.get("type"), "tiqr")
r = token1.get_failcount()
self.assertEqual(r, 0)
# Now set the challenge to be answered and recheck:
Challenge.query.filter(Challenge.transaction_id == transaction_id).update({"otp_valid": 1})
r, r_dict = check_token_list([token1], "", user=user, options={"transaction_id": transaction_id})
self.assertTrue(r)
self.assertEqual(r_dict.get("message"), "Found matching challenge")
remove_token(pin1)
class TokenFailCounterTestCase(MyTestCase):
"""
Test the lib.token on an interface level
"""
def test_00_create_realms(self):
self.setUp_user_realms()
def test_01_failcounter_max_hotp(self):
# Check if we can not authenticate with a token that has the maximum
# failcounter
user = User(login="cornelius", realm=self.realm1)
token = init_token({"serial": "test47", "pin": "test47",
"type": "hotp", "otpkey": OTPKEY},
user=user)
""" Truncated
Count Hexadecimal Decimal HOTP
0 4c93cf18 1284755224 755224
1 41397eea 1094287082 287082
2 82fef30 137359152 359152
3 66ef7655 1726969429 969429
4 61c5938a 1640338314 338314
5 33c083d4 868254676 254676
6 7256c032 1918287922 287922
7 4e5b397 82162583 162583
8 2823443f 673399871 399871
9 2679dc69 645520489 520489
10 403154
11 481090
12 868912
13 736127
"""
res, reply = check_user_pass(user, "test47287082")
self.assertTrue(res)
# Set the failcounter to maximum failcount
token.set_failcount(10)
# Authentication must fail, since the failcounter is reached
res, reply = check_user_pass(user, "test47359152")
self.assertFalse(res)
self.assertEqual(reply.get("message"), "matching 1 tokens, "
"Failcounter exceeded")
remove_token("test47")
def test_02_failcounter_max_totp(self):
# Check if we can not authenticate with a token that has the maximum
# failcounter
user = User(login="cornelius", realm=self.realm1)
pin = "testTOTP"
token = init_token({"serial": pin, "pin": pin,
"type": "totp", "otpkey": OTPKEY},
user=user)
"""
47251644 942826
47251645 063321
47251646 306773
47251647 722053
47251648 032819
47251649 705493
47251650 589836
"""
res, reply = check_user_pass(user, pin + "942826",
options={"initTime": 47251644 * 30})
self.assertTrue(res)
# Set the failcounter to maximum failcount
token.set_failcount(10)
# Authentication must fail, since the failcounter is reached
res, reply = check_user_pass(user, pin + "032819",
options={"initTime": 47251648 * 30})
self.assertFalse(res)
self.assertEqual(reply.get("message"), "matching 1 tokens, "
"Failcounter exceeded")
remove_token(pin)
def test_03_inc_failcounter_of_all_tokens(self):
# If a user has more than one token and authenticates with wrong OTP
# PIN, the failcounter on all tokens should be increased
user = User(login="cornelius", realm=self.realm1)
pin1 = "pin1"
pin2 = "pin2"
token1 = init_token({"serial": pin1, "pin": pin1,
"type": "hotp", "genkey": 1}, user=user)
token2 = init_token({"serial": pin2, "pin": pin2,
"type": "hotp", "genkey": 1}, user=user)
# Authenticate with pin1 will increase first failcounter
res, reply = check_user_pass(user, pin1 + "000000")
self.assertEqual(res, False)
self.assertEqual(reply.get("message"), "wrong otp value")
self.assertEqual(token1.token.failcount, 1)
self.assertEqual(token2.token.failcount, 0)
# Authenticate with a wrong PIN will increase all failcounters
res, reply = check_user_pass(user, "XXX" + "000000")
self.assertEqual(res, False)
self.assertEqual(reply.get("message"), "wrong otp pin")
self.assertEqual(token1.token.failcount, 2)
self.assertEqual(token2.token.failcount, 1)
remove_token(pin1)
remove_token(pin2)
def test_04_reset_all_failcounters(self):
from privacyidea.lib.policy import (set_policy, PolicyClass, SCOPE,
ACTION)
from flask import g
set_policy("reset_all", scope=SCOPE.AUTH,
action=ACTION.RESETALLTOKENS)
user = User(login="cornelius", realm=self.realm1)
pin1 = "pin1"
pin2 = "pin2"
token1 = init_token({"serial": pin1, "pin": pin1,
"type": "spass"}, user=user)
token2 = init_token({"serial": pin2, "pin": pin2,
"type": "spass"}, user=user)
token1.inc_failcount()
token2.inc_failcount()
token2.inc_failcount()
self.assertEqual(token1.token.failcount, 1)
self.assertEqual(token2.token.failcount, 2)
g.policy_object = PolicyClass()
g.audit_object = FakeAudit()
g.client_ip = None
g.serial = None
options = {"g": g}
check_token_list([token1, token2], pin1, user=user,
options=options, allow_reset_all_tokens=True)
self.assertEqual(token1.token.failcount, 0)
self.assertEqual(token2.token.failcount, 0)
# check with tokens without users
unassign_token(pin1)
unassign_token(pin1)
# After unassigning we need to set the PIN again
token1.set_pin(pin1)
token2.set_pin(pin2)
token1.inc_failcount()
token2.inc_failcount()
token2.inc_failcount()
self.assertEqual(token1.token.failcount, 1)
self.assertEqual(token2.token.failcount, 2)
check_token_list([token1, token2], pin1, options=options,
allow_reset_all_tokens=True)
self.assertEqual(token1.token.failcount, 0)
self.assertEqual(token2.token.failcount, 0)
# Clean up
remove_token(pin1)
remove_token(pin2)
def test_05_reset_failcounter(self):
tok = init_token({"type": "hotp",
"serial": "test05",
"otpkey": self.otpkey})
# Set failcounter clear timeout to 1 minute
set_privacyidea_config(FAILCOUNTER_CLEAR_TIMEOUT, 1)
tok.token.count = 10
tok.set_pin("hotppin")
tok.set_failcount(10)
exceeded_timestamp = datetime.datetime.now(tzlocal()) - datetime.timedelta(minutes=1)
tok.add_tokeninfo(FAILCOUNTER_EXCEEDED, exceeded_timestamp.strftime(DATE_FORMAT))
# OTP value #11
res, reply = check_token_list([tok], "hotppin481090")
self.assertTrue(res)
set_privacyidea_config(FAILCOUNTER_CLEAR_TIMEOUT, 0)
remove_token("test05")
def test_06_reset_failcounter_out_of_sync(self):
# Reset fail counter of a token that is out of sync
# The fail counter will reset, even if the token is out of sync, since the
# autoresync is handled in the tokenclass.authenticate.
tok = init_token({"type": "hotp",
"serial": "test06",
"otpkey": self.otpkey})
set_privacyidea_config("AutoResyncTimeout", "300")
set_privacyidea_config("AutoResync", 1)
tok.set_pin("hotppin")
tok.set_count_window(2)
res, reply = check_token_list([tok], "hotppin{0!s}".format(self.valid_otp_values[0]))
self.assertTrue(res)
# Now we set the failoucnter and the exceeded time.
tok.set_failcount(10)
exceeded_timestamp = datetime.datetime.now(tzlocal()) - datetime.timedelta(minutes=1)
tok.add_tokeninfo(FAILCOUNTER_EXCEEDED, exceeded_timestamp.strftime(DATE_FORMAT))
set_privacyidea_config(FAILCOUNTER_CLEAR_TIMEOUT, 1)
# authentication with otp value #3 will fail
res, reply = check_token_list([tok], "hotppin{0!s}".format(self.valid_otp_values[3]))
self.assertFalse(res)
# authentication with otp value #4 will resync and succeed
res, reply = check_token_list([tok], "hotppin{0!s}".format(self.valid_otp_values[4]))
self.assertTrue(res)
self.assertEqual(tok.get_failcount(), 0)
set_privacyidea_config(FAILCOUNTER_CLEAR_TIMEOUT, 0)
delete_privacyidea_config("AutoResyncTimeout")
delete_privacyidea_config("AutoResync")
remove_token("test06")
def test_07_reset_failcounter_on_pin_only(self):
tok = init_token({"type": "hotp",
"serial": "test07",
"otpkey": self.otpkey})
# Set failcounter clear timeout to 1 minute
set_privacyidea_config(FAILCOUNTER_CLEAR_TIMEOUT, 1)
tok.token.count = 10
tok.set_pin("hotppin")
tok.set_failcount(10)
exceeded_timestamp = datetime.datetime.now(tzlocal()) - datetime.timedelta(minutes=1)
tok.add_tokeninfo(FAILCOUNTER_EXCEEDED, exceeded_timestamp.strftime(DATE_FORMAT))
# by default, correct PIN + wrong OTP value does not reset the failcounter
res, reply = check_token_list([tok], "hotppin123456")
self.assertEqual(tok.get_failcount(), 10)
self.assertFalse(res)
# with the corresponding config option ...
set_privacyidea_config(SYSCONF.RESET_FAILCOUNTER_ON_PIN_ONLY, "True")
# ... correct PIN + wrong OTP resets the failcounter ...
res, reply = check_token_list([tok], "hotppin123456")
self.assertEqual(tok.get_failcount(), 0)
# ... but authentication still fails
self.assertFalse(res)
set_privacyidea_config(FAILCOUNTER_CLEAR_TIMEOUT, 0)
delete_privacyidea_config(SYSCONF.RESET_FAILCOUNTER_ON_PIN_ONLY)
remove_token("test07")
class PINChangeTestCase(MyTestCase):
"""
Test the check_token_list from lib.token on an interface level
"""
def test_00_create_realms(self):
self.setUp_user_realms()
# Set a policy to change the pin every 10d
set_policy("every10d", scope=SCOPE.ENROLL, action="{0!s}=10d".format(ACTION.CHANGE_PIN_EVERY))
# set policy for chalresp
set_policy("chalresp", scope=SCOPE.AUTH, action="{0!s}=hotp".format(ACTION.CHALLENGERESPONSE))
# Change PIN via validate
set_policy("viaValidate", scope=SCOPE.AUTH, action=ACTION.CHANGE_PIN_VIA_VALIDATE)
def test_01_successfully_change_pin(self):
"""
Authentication per challenge response with an HOTP token and then
do a successful PIN reset
"""
g = FakeFlaskG()
g.client_ip = "10.0.0.1"
g.policy_object = PolicyClass()
g.audit_object = FakeAudit()
user_obj = User("cornelius", realm=self.realm1)
# remove all tokens of cornelius
remove_token(user=user_obj)
tok = init_token({"type": "hotp",
"otpkey": self.otpkey, "pin": "test",
"serial": "PINCHANGE"}, tokenrealms=["r1"], user=user_obj)
tok2 = init_token({"type": "hotp",
"otpkey": self.otpkey, "pin": "fail",
"serial": "NOTNEEDED"}, tokenrealms=["r1"], user=user_obj)
# Set, that the token needs to change the pin
tok.set_next_pin_change("-1d")
# Check it
self.assertTrue(tok.is_pin_change())
# Trigger the first auth challenge by sending the PIN
r, reply_dict = check_token_list([tok, tok2], "test", user=user_obj, options={"g": g})
self.assertFalse(r)
self.assertEqual('please enter otp: ', reply_dict.get("message"))
transaction_id = reply_dict.get("transaction_id")
# Now send the correct OTP value
r, reply_dict = check_token_list([tok, tok2], self.valid_otp_values[1], user=user_obj,
options={"transaction_id": transaction_id,
"g": g})
self.assertFalse(r)
self.assertEqual("Please enter a new PIN", reply_dict.get("message"))
transaction_id = reply_dict.get("transaction_id")
# Now send a new PIN
newpin = "test2"
r, reply_dict = check_token_list([tok, tok2], newpin, user=user_obj,
options={"transaction_id": transaction_id,
"g": g})
self.assertFalse(r)
self.assertEqual("Please enter the new PIN again", reply_dict.get("message"))
transaction_id = reply_dict.get("transaction_id")
# Now send the new PIN a 2nd time
r, reply_dict = check_token_list([tok, tok2], newpin, user=user_obj,
options={"transaction_id": transaction_id,
"g": g})
self.assertTrue(r)
self.assertEqual("PIN successfully set.", reply_dict.get("message"))
self.assertFalse(tok.is_pin_change())
# Run an authentication with the new PIN
r, reply_dict = check_token_list([tok, tok2], "{0!s}{1!s}".format(newpin, self.valid_otp_values[2]),
user=user_obj, options={"g": g})
self.assertTrue(r)
self.assertFalse(reply_dict.get("pin_change"))
self.assertTrue("next_pin_change" in reply_dict)
def test_02_failed_change_pin(self):
"""
Authentication with an HOTP token and then fail to
change pin, since we present two different PINs.
"""
g = FakeFlaskG()
g.client_ip = "10.0.0.1"
g.policy_object = PolicyClass()
g.audit_object = FakeAudit()
user_obj = User("cornelius", realm=self.realm1)
# remove all tokens of cornelius
remove_token(user=user_obj)
tok = init_token({"type": "hotp",
"otpkey": self.otpkey, "pin": "test",
"serial": "PINCHANGE"}, tokenrealms=["r1"], user=user_obj)
tok2 = init_token({"type": "hotp",
"otpkey": self.otpkey, "pin": "fail",
"serial": "NOTNEEDED"}, tokenrealms=["r1"], user=user_obj)
# Set, that the token needs to change the pin
tok.set_next_pin_change("-1d")
# Check it
self.assertTrue(tok.is_pin_change())
# successfully authenticate, but thus trigger a PIN change
r, reply_dict = check_token_list([tok, tok2], "test{0!s}".format(self.valid_otp_values[1]),
user=user_obj, options={"g": g})
self.assertFalse(r)
self.assertEqual("Please enter a new PIN", reply_dict.get("message"))
transaction_id = reply_dict.get("transaction_id")
# Now send a new PIN
newpin = "test2"
r, reply_dict = check_token_list([tok, tok2], newpin, user=user_obj,
options={"transaction_id": transaction_id,
"g": g})
self.assertFalse(r)
self.assertEqual("Please enter the new PIN again", reply_dict.get("message"))
transaction_id = reply_dict.get("transaction_id")
# Now send the new PIN a 2nd time
r, reply_dict = check_token_list([tok, tok2], "falsePIN", user=user_obj,
options={"transaction_id": transaction_id,
"g": g})
self.assertFalse(r)
self.assertEqual("PINs do not match", reply_dict.get("message"))
# The PIN still needs to be changed!
self.assertTrue(tok.is_pin_change())
def test_03_failed_change_pin(self):
"""
Authentication with an HOTP token and then fail to
change pin, since we do not comply to the PIN policies :-)
"""
g = FakeFlaskG()
g.client_ip = "10.0.0.1"
g.policy_object = PolicyClass()
g.audit_object = FakeAudit()
user_obj = User("cornelius", realm=self.realm1)
# remove all tokens of cornelius
remove_token(user=user_obj)
tok = init_token({"type": "hotp",
"otpkey": self.otpkey, "pin": "test",
"serial": "PINCHANGE"}, tokenrealms=["r1"], user=user_obj)
tok2 = init_token({"type": "hotp",
"otpkey": self.otpkey, "pin": "fail",
"serial": "NOTNEEDED"}, tokenrealms=["r1"], user=user_obj)
# Set, that the token needs to change the pin
tok.set_next_pin_change("-1d")
# Check it
self.assertTrue(tok.is_pin_change())
# Require minimum length of 5
set_policy("minpin", scope=SCOPE.USER, action="{0!s}=5".format(ACTION.OTPPINMINLEN))
# successfully authenticate, but thus trigger a PIN change
r, reply_dict = check_token_list([tok, tok2], "test{0!s}".format(self.valid_otp_values[1]),
user=user_obj, options={"g": g})
self.assertFalse(r)
self.assertEqual("Please enter a new PIN", reply_dict.get("message"))
transaction_id = reply_dict.get("transaction_id")
# Now send a new PIN, which has only length 4 :-/
newpin = "test"
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning)
self.assertRaisesRegexp(
PolicyError, "The minimum OTP PIN length is 5", check_token_list,
[tok, tok2], newpin, user=user_obj,
options={"transaction_id": transaction_id,
"g": g})
delete_policy("minpin")
| agpl-3.0 | 3,012,626,580,595,922,400 | 44.548961 | 126 | 0.567188 | false |
ckclark/leetcode | py/count-of-smaller-numbers-after-self.py | 1 | 1182 | class Solution(object):
def deduceRemain(self, segment_tree, n):
for l in segment_tree:
if n < len(l):
l[n] -= 1
n >>= 1
def countRemainFirstN(self, segment_tree, n):
ans = 0
for l in segment_tree:
if n == 0:
break
if n & 1:
ans += l[n - 1]
n >>= 1
return ans
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
size = len(nums)
ans = [0] * size
segment_tree = []
segment_tree.append([1] * size)
t = size / 2
while t > 0:
prev = segment_tree[-1]
nxt = []
segment_tree.append(nxt)
for i in xrange(t):
nxt.append(prev[i * 2] + prev[i * 2 + 1])
t /= 2
order = list(enumerate(nums))
order.sort(key=lambda x:(-x[1], -x[0]))
for idx, _ in order:
ans[idx] = self.countRemainFirstN(segment_tree, size) - self.countRemainFirstN(segment_tree, idx + 1)
self.deduceRemain(segment_tree, idx)
return ans
| apache-2.0 | 578,514,358,702,732,300 | 28.55 | 113 | 0.450085 | false |
StephenLujan/Naith | game/plugins/pointlight/pointlight.py | 1 | 1351 | # Copyright Tom SF Haines, Reinier de Blois
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pandac.PandaModules import VBase4
from pandac.PandaModules import PointLight as PPointLight
class PointLight:
"""Creates a simple point light"""
def __init__(self,manager,xml):
self.light = PPointLight('plight')
self.lightNode = render.attachNewNode(self.light)
self.reload(manager,xml)
def reload(self,manager,xml):
color = xml.find('color')
if color!=None:
self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0))
pos = xml.find('pos')
if pos!=None:
self.lightNode.setPos(render, float(pos.get('x')), float(pos.get('y')), float(pos.get('z')))
def start(self):
render.setLight(self.lightNode)
def stop(self):
render.clearLight(self.lightNode)
| apache-2.0 | 7,548,093,377,534,899,000 | 31.166667 | 107 | 0.712065 | false |
monikagrabowska/osf.io | kinto/kinto/core/authorization.py | 1 | 9913 | import functools
from pyramid.settings import aslist
from pyramid.security import IAuthorizationPolicy, Authenticated
from zope.interface import implementer
from kinto.core import utils
from kinto.core.storage import exceptions as storage_exceptions
from kinto.core.authentication import prefixed_userid
# A permission is called "dynamic" when it's computed at request time.
DYNAMIC = 'dynamic'
# When permission is set to "private", only the current user is allowed.
PRIVATE = 'private'
def groupfinder(userid, request):
"""Fetch principals from permission backend for the specified `userid`.
This is plugged by default using the ``multiauth.groupfinder`` setting.
"""
backend = getattr(request.registry, 'permission', None)
# Permission backend not configured. Ignore.
if not backend:
return []
# Safety check when Kinto-Core is used without pyramid_multiauth.
if request.prefixed_userid:
userid = request.prefixed_userid
# Query the permission backend only once per request (e.g. batch).
reify_key = userid + '_principals'
if reify_key not in request.bound_data:
principals = backend.get_user_principals(userid)
request.bound_data[reify_key] = principals
return request.bound_data[reify_key]
@implementer(IAuthorizationPolicy)
class AuthorizationPolicy(object):
"""Default authorization class, that leverages the permission backend
for shareable resources.
"""
get_bound_permissions = None
"""Callable that takes an object id and a permission and returns
a list of tuples (<object id>, <permission>). Useful when objects
permission depend on others."""
def permits(self, context, principals, permission):
if permission == PRIVATE:
return Authenticated in principals
# Add prefixed user id to principals.
prefixed_userid = context.get_prefixed_userid()
if prefixed_userid and ':' in prefixed_userid:
principals = principals + [prefixed_userid]
prefix, user_id = prefixed_userid.split(':', 1)
# Remove unprefixed user id to avoid conflicts.
# (it is added via Pyramid Authn policy effective principals)
if user_id in principals:
principals.remove(user_id)
# Retro-compatibility with cliquet 2.0 '_' user id prefixes.
# Just in case it was used in permissions definitions.
principals.append('%s_%s' % (prefix, user_id))
if permission == DYNAMIC:
permission = context.required_permission
if permission == 'create':
permission = '%s:%s' % (context.resource_name, permission)
if context.allowed_principals:
allowed = bool(set(context.allowed_principals) & set(principals))
else:
object_id = context.permission_object_id
if self.get_bound_permissions is None:
bound_perms = [(object_id, permission)]
else:
bound_perms = self.get_bound_permissions(object_id, permission)
allowed = context.check_permission(principals, bound_perms)
# If not allowed on this collection, but some records are shared with
# the current user, then authorize.
# The ShareableResource class will take care of the filtering.
is_list_operation = (context.on_collection and
not permission.endswith('create'))
if not allowed and is_list_operation:
shared = context.fetch_shared_records(permission,
principals,
self.get_bound_permissions)
allowed = shared is not None
return allowed
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError() # PRAGMA NOCOVER
class RouteFactory(object):
resource_name = None
on_collection = False
required_permission = None
allowed_principals = None
permission_object_id = None
current_record = None
shared_ids = None
method_permissions = {
"head": "read",
"get": "read",
"post": "create",
"delete": "write",
"patch": "write"
}
def __init__(self, request):
# Make it available for the authorization policy.
self.get_prefixed_userid = functools.partial(prefixed_userid, request)
# Store some shortcuts.
permission = request.registry.permission
self.check_permission = permission.check_permission
self._get_accessible_objects = permission.get_accessible_objects
# Store current resource and required permission.
service = utils.current_service(request)
is_on_resource = (service is not None and
hasattr(service, 'viewset') and
hasattr(service, 'resource'))
if is_on_resource:
self.resource_name = request.current_resource_name
self.on_collection = getattr(service, "type", None) == "collection"
self.permission_object_id, self.required_permission = (
self._find_required_permission(request, service))
# To obtain shared records on a collection endpoint, use a match:
self._object_id_match = self.get_permission_object_id(request, '*')
# Check if principals are allowed explicitly from settings.
settings = request.registry.settings
setting = '%s_%s_principals' % (self.resource_name,
self.required_permission)
self.allowed_principals = aslist(settings.get(setting, ''))
def fetch_shared_records(self, perm, principals, get_bound_permissions):
"""Fetch records that are readable or writable for the current
principals.
See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`
If no record is shared, it returns None.
.. warning::
This sets the ``shared_ids`` attribute to the context with the
return value. The attribute is then read by
:class:`kinto.core.resource.ShareableResource`
"""
if get_bound_permissions:
bound_perms = get_bound_permissions(self._object_id_match, perm)
else:
bound_perms = [(self._object_id_match, perm)]
by_obj_id = self._get_accessible_objects(principals, bound_perms)
ids = by_obj_id.keys()
if len(ids) > 0:
# Store for later use in ``ShareableResource``.
self.shared_ids = [self._extract_object_id(id_) for id_ in ids]
else:
self.shared_ids = None
return self.shared_ids
def get_permission_object_id(self, request, object_id=None):
"""Returns the permission object id for the current request.
In the nominal case, it is just the current URI without version prefix.
For collections, it is the related record URI using the specified
`object_id`.
See :meth:`kinto.core.resource.model.SharableModel` and
:meth:`kinto.core.authorization.RouteFactory.__init__`
"""
object_uri = utils.strip_uri_prefix(request.path)
if self.on_collection and object_id is not None:
# With the current request on a collection, the record URI must
# be found out by inspecting the collection service and its sibling
# record service.
matchdict = request.matchdict.copy()
matchdict['id'] = object_id
try:
object_uri = utils.instance_uri(request,
self.resource_name,
**matchdict)
if object_id == '*':
object_uri = object_uri.replace('%2A', '*')
except KeyError:
# Maybe the resource has no single record endpoint.
# We consider that object URIs in permissions backend will
# be stored naively:
object_uri = object_uri + '/' + object_id
return object_uri
def _extract_object_id(self, object_uri):
# XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']
return object_uri.split('/')[-1]
def _find_required_permission(self, request, service):
"""Find out what is the permission object id and the required
permission.
.. note::
This method saves an attribute ``self.current_record`` used
in :class:`kinto.core.resource.UserResource`.
"""
# By default, it's a URI a and permission associated to the method.
permission_object_id = self.get_permission_object_id(request)
method = request.method.lower()
required_permission = self.method_permissions.get(method)
# In the case of a "PUT", check if the targetted record already
# exists, return "write" if it does, "create" otherwise.
if request.method.lower() == "put":
resource = service.resource(request=request, context=self)
try:
record = resource.model.get_record(resource.record_id)
# Save a reference, to avoid refetching from storage in
# resource.
self.current_record = record
except storage_exceptions.RecordNotFoundError:
# The record does not exist, the permission to create on
# the related collection is required.
permission_object_id = service.collection_path.format(
**request.matchdict)
required_permission = "create"
else:
required_permission = "write"
return (permission_object_id, required_permission)
| apache-2.0 | 5,708,944,877,151,772,000 | 39.96281 | 79 | 0.616261 | false |
nightstrike/blender_nif_plugin | testframework/integration/collisions/bhkshape/test_collision.py | 1 | 1776 | """Import and export collision data"""
class TestBhkCollisionSphereShape(TestBaseGeometry, TestBhkCollision):
n_name = "collisions/base_bhkcollision_sphere" #name of nif
b_name = "Cube" #name of blender mesh object
def b_create_object(self):
b_obj = TestBaseGeometry.b_create_object(self)
bpy.ops.mesh.primitive_uv_sphere_add()
b_coll = bpy.data.objects["Sphere"]
b_coll.data.show_double_sided = False
b_coll.name = "CollisionSphere"
b_coll = bpy.data.objects["CollisionSphere"]
class TestBhkCollisionTriangleShape(TestBaseGeometry, TestBhkCollision):
n_name = "collisions/base_bhkcollision_triangle" #name of nif
b_name = "CubeObject" #name of blender mesh object
def b_create_object(self):
b_obj = TestBaseGeometry.b_create_object(self, self.b_name)
bpy.ops.mesh.primitive_cube_add()
b_coll = bpy.data.objects["Cube"]
b_coll.data.show_double_sided = False
b_coll.name = "CollisionTriangles"
b_coll = bpy.data.objects["CollisionTriangles"]
def b_check_geom(self, b_mesh):
if b_mesh.name == "poly0":
nose.tools.assert_equal(len(b_mesh.vertices), 8)
class TestBhkCapsuleObject(TestBaseGeometry, TestBhkCollision):
n_name = "collisions/base_bhkcollision_capsule" #name of nif
b_name = "Cube" #name of blender mesh object
def b_create_object(self):
b_obj = TestBaseGeometry.b_create_object(self, self.b_name)
bpy.ops.mesh.primitive_cylinder_add(vertices=8,radius=1.2,depth=2)
b_coll = bpy.context.active_object
b_coll.data.show_double_sided = False
b_coll.name = "CollisionCapsule"
b_coll = bpy.data.objects["CollisionCapsule"]
b_coll.draw_type = 'WIRE'
| bsd-3-clause | 6,808,437,871,557,297,000 | 36 | 74 | 0.671171 | false |
ducted/duct | duct/service.py | 1 | 10144 | """
.. module:: service
:synopsis: Core service classes
.. moduleauthor:: Colin Alston <[email protected]>
"""
import time
import sys
import os
import importlib
import re
import copy
from twisted.application import service
from twisted.internet import task, reactor, defer
from twisted.python import log
class DuctService(service.Service):
"""Duct service
Runs timers, configures sources and and manages the queue
"""
def __init__(self, config):
self.running = 0
self.sources = []
self.lastEvents = {}
self.outputs = {}
self.evCache = {}
self.critical = {}
self.warn = {}
self.hostConnectorCache = {}
self.eventCounter = 0
self.factory = None
self.protocol = None
self.watchdog = None
self.config = config
if os.path.exists('/var/lib/duct'):
sys.path.append('/var/lib/duct')
# Read some config stuff
self.debug = float(self.config.get('debug', False))
self.ttl = float(self.config.get('ttl', 60.0))
self.stagger = float(self.config.get('stagger', 0.2))
# Backward compatibility
self.server = self.config.get('server', None)
self.port = int(self.config.get('port', 5555))
self.proto = self.config.get('proto', 'tcp')
self.inter = self.config.get('interval', 60.0)
if self.debug:
print("config:", repr(config))
self.setupSources(self.config)
def setupOutputs(self, config):
"""Setup output processors"""
if self.server:
if self.proto == 'tcp':
defaultOutput = {
'output': 'duct.outputs.riemann.RiemannTCP',
'server': self.server,
'port': self.port
}
else:
defaultOutput = {
'output': 'duct.outputs.riemann.RiemannUDP',
'server': self.server,
'port': self.port
}
outputs = config.get('outputs', [defaultOutput])
else:
outputs = config.get('outputs', [])
for output in outputs:
if 'debug' not in output:
output['debug'] = self.debug
cl = output['output'].split('.')[-1] # class
path = '.'.join(output['output'].split('.')[:-1]) # import path
# Import the module and construct the output object
outputObj = getattr(
importlib.import_module(path), cl)(output, self)
name = output.get('name', None)
# Add the output to our routing hash
if name in self.outputs:
self.outputs[name].append(outputObj)
else:
self.outputs[name] = [outputObj]
# connect the output
reactor.callLater(0, outputObj.createClient)
def createSource(self, source):
"""Construct the source object as defined in the configuration
"""
if source.get('path'):
path = source['path']
if path not in sys.path:
sys.path.append(path)
# Resolve the source
cl = source['source'].split('.')[-1] # class
path = '.'.join(source['source'].split('.')[:-1]) # import path
# Import the module and get the object source we care about
sourceObj = getattr(importlib.import_module(path), cl)
if 'debug' not in source:
source['debug'] = self.debug
if 'ttl' not in source.keys():
source['ttl'] = self.ttl
if 'interval' not in source.keys():
source['interval'] = self.inter
return sourceObj(source, self.sendEvent, self)
def setupTriggers(self, source, sobj):
"""Setup trigger actions for a source
"""
if source.get('critical'):
self.critical[sobj] = [(re.compile(key), val)
for key, val in source['critical'].items()]
if source.get('warning'):
self.warn[sobj] = [(re.compile(key), val)
for key, val in source['warning'].items()]
def setupSources(self, config):
"""Sets up source objects from the given config"""
sources = config.get('sources', [])
for source in sources:
src = self.createSource(source)
self.setupTriggers(source, src)
self.sources.append(src)
def _aggregateQueue(self, events):
# Handle aggregation for each event
queue = []
for ev in events:
if ev.aggregation:
eid = ev.eid()
thisM = ev.metric
if eid in self.evCache:
lastM, lastTime = self.evCache[eid]
tDelta = ev.time - lastTime
metric = ev.aggregation(
lastM, ev.metric, tDelta)
if metric:
ev.metric = metric
queue.append(ev)
self.evCache[eid] = (thisM, ev.time)
else:
queue.append(ev)
return queue
def setStates(self, source, queue):
"""
Check Event triggers against the configured source and apply the
corresponding state
"""
for ev in queue:
if ev.state == 'ok':
for key, val in self.warn.get(source, []):
if key.match(ev.service):
state = eval("service %s" % val, {'service': ev.metric})
if state:
ev.state = 'warning'
for key, val in self.critical.get(source, []):
if key.match(ev.service):
state = eval("service %s" % val, {'service': ev.metric})
if state:
ev.state = 'critical'
def routeEvent(self, source, events):
"""Route event to the queue of the output configured for it
"""
routes = source.config.get('route', None)
if not isinstance(routes, list):
routes = [routes]
for route in routes:
if self.debug:
log.msg("Sending events %s to %s" % (events, route))
if not route in self.outputs:
# Non existant route
log.msg('Could not route %s -> %s.' % (
source.config['service'], route))
else:
for output in self.outputs[route]:
reactor.callLater(0, output.eventsReceived, events)
def sendEvent(self, source, events):
"""Callback that all event sources call when they have a new event
or list of events
"""
if isinstance(events, list):
self.eventCounter += len(events)
else:
self.eventCounter += 1
events = [events]
queue = self._aggregateQueue(events)
if queue:
if (source in self.critical) or (source in self.warn):
self.setStates(source, queue)
self.routeEvent(source, queue)
queue = []
self.lastEvents[source] = time.time()
@defer.inlineCallbacks
def _startSource(self, source):
yield defer.maybeDeferred(source.startTimer)
@defer.inlineCallbacks
def startService(self):
yield self.setupOutputs(self.config)
if self.debug:
log.msg("Starting service")
stagger = 0
# Start sources internal timers
for source in self.sources:
if self.debug:
log.msg("Starting source " + source.config['service'])
# Stagger source timers, or use per-source start_delay
start_delay = float(source.config.get('start_delay', stagger))
reactor.callLater(start_delay, self._startSource, source)
stagger += self.stagger
reactor.callLater(stagger, self.startWatchdog)
self.running = 1
def startWatchdog(self):
"""Start source watchdog
"""
self.watchdog = task.LoopingCall(self.sourceWatchdog)
self.watchdog.start(10)
@defer.inlineCallbacks
def sourceWatchdog(self):
"""Watchdog timer function.
Recreates sources which have not generated events in 10*interval if
they have watchdog set to true in their configuration
"""
for i, source in enumerate(self.sources):
if not source.config.get('watchdog', False):
continue
last = self.lastEvents.get(source, None)
if last:
sn = repr(source)
try:
if last < (time.time()-(source.inter*10)):
log.msg("Trying to restart stale source %s: %ss" % (
sn, int(time.time() - last)
))
source = self.sources.pop(i)
try:
yield source.stopTimer()
except Exception as ex:
log.msg("Could not stop timer for %s: %s" % (
sn, ex))
config = copy.deepcopy(source.config)
del self.lastEvents[source]
del source
source = self.createSource(config)
reactor.callLater(0, self._startSource, source)
except Exception as ex:
log.msg("Could not reset source %s: %s" % (
sn, ex))
@defer.inlineCallbacks
def stopService(self):
self.running = 0
if self.watchdog and self.watchdog.running:
self.watchdog.stop()
for source in self.sources:
yield defer.maybeDeferred(source.stopTimer)
for _, outputs in self.outputs.items():
for output in outputs:
yield defer.maybeDeferred(output.stop)
| mit | 7,231,771,732,266,448,000 | 30.799373 | 80 | 0.51735 | false |
interlegis/sigi | sigi/apps/parlamentares/views.py | 1 | 4616 | # coding: utf-8
import datetime
import csv
from django.template import Context, loader
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.conf import settings
from django.shortcuts import render, get_list_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from django.template import RequestContext
from sigi.apps.casas.models import Orgao
from sigi.apps.parlamentares.models import Parlamentar
from sigi.apps.parlamentares.reports import ParlamentaresLabels
from geraldo.generators import PDFGenerator
from django.contrib.auth.decorators import login_required
def adicionar_parlamentar_carrinho(request, queryset=None, id=None):
if request.method == 'POST':
ids_selecionados = request.POST.getlist('_selected_action')
if 'carrinho_parlametar' not in request.session:
request.session['carrinho_parlamentar'] = ids_selecionados
else:
lista = request.session['carrinho_parlamentar']
# Verifica se id já não está adicionado
for id in ids_selecionados:
if id not in lista:
lista.append(id)
request.session['carrinho_parlamentar'] = lista
@login_required
@csrf_protect
def visualizar_carrinho(request):
qs = carrinhoOrGet_for_qs(request)
paginator = Paginator(qs, 100)
# Make sure page request is an int. If not, deliver first page.
# Esteja certo de que o `page request` é um inteiro. Se não, mostre a primeira página.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# Se o page request (9999) está fora da lista, mostre a última página.
try:
paginas = paginator.page(page)
except (EmptyPage, InvalidPage):
paginas = paginator.page(paginator.num_pages)
carrinhoIsEmpty = not('carrinho_parlamentares' in request.session)
return render(
request,
'parlamentares/carrinho.html',
{
'carIsEmpty': carrinhoIsEmpty,
'paginas': paginas,
'query_str': '?' + request.META['QUERY_STRING']
}
)
def carrinhoOrGet_for_qs(request):
"""
Verifica se existe parlamentares na sessão se não verifica get e retorna qs correspondente.
"""
if 'carrinho_parlamentar' in request.session:
ids = request.session['carrinho_parlamentar']
qs = Parlamentar.objects.filter(pk__in=ids)
else:
qs = Parlamentar.objects.all()
if request.GET:
qs = get_for_qs(request.GET, qs)
return qs
def query_ordena(qs, o, ot):
list_display = ('nome_completo',)
aux = list_display[(int(o) - 1)]
if ot == 'asc':
qs = qs.order_by(aux)
else:
qs = qs.order_by("-" + aux)
return qs
def get_for_qs(get, qs):
"""
Verifica atributos do GET e retorna queryset correspondente
"""
kwargs = {}
for k, v in get.iteritems():
if not (k == 'page' or k == 'pop' or k == 'q'):
if not k == 'o':
if k == "ot":
qs = query_ordena(qs, get["o"], get["ot"])
else:
kwargs[str(k)] = v
qs = qs.filter(**kwargs)
return qs
@login_required
def deleta_itens_carrinho(request):
"""
Deleta itens selecionados do carrinho
"""
if request.method == 'POST':
ids_selecionados = request.POST.getlist('_selected_action')
if 'carrinho_parlamentar' in request.session:
lista = request.session['carrinho_parlamentar']
for item in ids_selecionados:
lista.remove(item)
if lista:
request.session['carrinho_parlamentar'] = lista
else:
del lista
del request.session['carrinho_parlamentar']
return HttpResponseRedirect('.')
@login_required
def labels_report(request, id=None, formato='3x9_etiqueta'):
""" TODO: adicionar suporte para resultado de pesquisa do admin.
"""
if request.POST:
if 'tipo_etiqueta' in request.POST:
tipo = request.POST['tipo_etiqueta']
if id:
qs = Parlamentar.objects.filter(pk=id)
else:
qs = carrinhoOrGet_for_qs(request)
if not qs:
return HttpResponseRedirect('../')
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=casas.pdf'
report = ParlamentaresLabels(queryset=qs, formato=formato)
report.generate_by(PDFGenerator, filename=response)
return response
| gpl-2.0 | 6,510,522,160,181,261,000 | 29.496689 | 98 | 0.636699 | false |
tfXYZ/tfXYZ | core/losses.py | 1 | 1634 | import tensorflow as tf
from .blocks import gather_nd
tf.app.flags.DEFINE_float('alpha', 1.0, '')
tf.app.flags.DEFINE_float('beta', 1.0, '')
FLAGS = tf.app.flags.FLAGS
def ce_loss(logits, labels, **kwargs):
"""
The standard classification loss. Applies softmax on the logits and computes the loss.
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
return tf.reduce_mean(cross_entropy)
def ce_loss_from_softmax(softmax_out, labels, avg=True, **kwargs):
"""
The standard classification loss. Takes the softmax output and computes the loss.
"""
indices = tf.transpose(tf.stack([tf.constant(range(0, softmax_out.get_shape()[0].value)), labels]), [1,0])
correct_probs = gather_nd(softmax_out, indices)
loss = -tf.reduce_mean(tf.log(correct_probs)) if avg else -tf.log(correct_probs)
return loss
def binary_ce_loss(logits, labels, n_classes, **kwargs):
"""
Binary CE loss, for multilabel classification and other applications.
"""
one_hot_labels = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=one_hot_labels)
return tf.reduce_mean(loss)
def MSE_loss(tensor, targets):
"""
Standard MSE loss.
"""
loss = tf.reduce_mean(tf.pow(tensor - targets, 2))
return loss
def mix_sigmoid_ce_loss(logits, labels, n_classes, **kwargs):
"""
A mix between the standard CE and binary CE loss, according to alpha and beta.
"""
print('alpha, beta:', FLAGS.alpha, FLAGS.beta)
loss = ce_loss(logits, labels) * FLAGS.alpha + binary_ce_loss(logits, labels, n_classes) * FLAGS.beta
return loss
| gpl-3.0 | 3,675,278,564,448,912,000 | 31.039216 | 108 | 0.70257 | false |
klahnakoski/esReplicate | pyLibrary/sql/redshift.py | 1 | 5408 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# FOR WINDOWS INSTALL OF psycopg2
# http://stickpeople.com/projects/python/win-psycopg/2.6.0/psycopg2-2.6.0.win32-py2.7-pg9.4.1-release.exe
import psycopg2
from psycopg2.extensions import adapt
from pyLibrary import convert
from mo_logs.exceptions import suppress_exception
from mo_logs import Log
from mo_kwargs import override
from pyLibrary.queries import jx
from pyLibrary.sql import SQL
from mo_logs.strings import expand_template
from mo_threads import Lock
class Redshift(object):
@override
def __init__(self, host, user, password, database=None, port=5439, kwargs=None):
self.settings=kwargs
self.locker = Lock()
self.connection = None
def _connect(self):
self.connection=psycopg2.connect(
database=self.settings.database,
user=self.settings.user,
password=self.settings.password,
host=self.settings.host,
port=self.settings.port
)
def query(self, sql, param=None):
return self.execute(sql, param)
def execute(
self,
command,
param=None,
retry=True # IF command FAILS, JUST THROW ERROR
):
if param:
command = expand_template(command, self.quote_param(param))
output = None
done = False
while not done:
try:
with self.locker:
if not self.connection:
self._connect()
with Closer(self.connection.cursor()) as curs:
curs.execute(command)
if curs.rowcount >= 0:
output = curs.fetchall()
self.connection.commit()
done = True
except Exception as e:
with suppress_exception:
self.connection.rollback()
# TODO: FIGURE OUT WHY rollback() DOES NOT HELP
self.connection.close()
self.connection = None
self._connect()
if not retry:
Log.error("Problem with command:\n{{command|indent}}", command= command, cause=e)
return output
def insert(self, table_name, record):
keys = record.keys()
try:
command = "INSERT INTO " + self.quote_column(table_name) + "(" + \
",".join([self.quote_column(k) for k in keys]) + \
") VALUES (" + \
",".join([self.quote_value(record[k]) for k in keys]) + \
")"
self.execute(command)
except Exception as e:
Log.error("problem with record: {{record}}", record= record, cause=e)
def insert_list(self, table_name, records):
if not records:
return
columns = set()
for r in records:
columns |= set(r.keys())
columns = jx.sort(columns)
try:
self.execute(
"DELETE FROM " + self.quote_column(table_name) + " WHERE _id IN {{ids}}",
{"ids": self.quote_column([r["_id"] for r in records])}
)
command = \
"INSERT INTO " + self.quote_column(table_name) + "(" + \
",".join([self.quote_column(k) for k in columns]) + \
") VALUES " + ",\n".join([
"(" + ",".join([self.quote_value(r.get(k, None)) for k in columns]) + ")"
for r in records
])
self.execute(command)
except Exception as e:
Log.error("problem with insert", e)
def quote_param(self, param):
output={}
for k, v in param.items():
if isinstance(v, SQL):
output[k]=v.sql
else:
output[k]=self.quote_value(v)
return output
def quote_column(self, name):
if isinstance(name, basestring):
return SQL('"' + name.replace('"', '""') + '"')
return SQL("(" + (", ".join(self.quote_value(v) for v in name)) + ")")
def quote_value(self, value):
if value ==None:
return SQL("NULL")
if isinstance(value, list):
json = convert.value2json(value)
return self.quote_value(json)
if isinstance(value, basestring) and len(value) > 256:
value = value[:256]
return SQL(adapt(value))
def es_type2pg_type(self, es_type):
return PG_TYPES.get(es_type, "character varying")
PG_TYPES = {
"boolean": "boolean",
"double": "double precision",
"float": "double precision",
"string": "VARCHAR",
"long": "bigint"
}
class Closer(object):
def __init__(self, resource):
self.resource = resource
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with suppress_exception:
self.resource.close()
def __getattr__(self, item):
return getattr(self.resource, item)
| mpl-2.0 | -9,127,470,044,056,634,000 | 29.382022 | 105 | 0.54253 | false |
8l/beri | cheritest/trunk/tests/cp0/test_tge_lt.py | 2 | 1334 | #-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_tge_lt(BaseBERITestCase):
def test_tge_handled(self):
self.assertRegisterEqual(self.MIPS.a2, 0, "tge trapped when less than")
| apache-2.0 | -5,449,927,444,243,743,000 | 40.6875 | 79 | 0.76012 | false |
jimsize/PySolFC | pysollib/kivy/toolbar.py | 1 | 10827 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------#
# Copyright (C) 2016-2017 LB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------#
# imports
import os
# PySol imports
from pysollib.util import IMAGE_EXTENSIONS
from pysollib.settings import TITLE
from pysollib.winsystems import TkSettings
from pysollib.mygettext import _, n_
# ************************************************************************
# *
# ************************************************************************
class AbstractToolbarButton:
def __init__(self, parent, toolbar, toolbar_name, position):
self.toolbar = toolbar
self.toolbar_name = toolbar_name
self.position = position
self.visible = False
def show(self, orient, force=False):
if self.visible and not force:
return
self.visible = True
padx, pady = 2, 2
if orient == 'horizontal':
self.grid(row=0,
column=self.position,
ipadx=padx, ipady=pady,
sticky='nsew')
else:
self.grid(row=self.position,
column=0,
ipadx=padx, ipady=pady,
sticky='nsew')
def hide(self):
if not self.visible:
return
self.visible = False
self.grid_forget()
# ************************************************************************
if True:
from pysollib.kivy.LApp import LImage
from pysollib.kivy.LApp import LBase
# from LApp import LMainWindow
from kivy.uix.boxlayout import BoxLayout
# from kivy.uix.button import Button
from kivy.uix.behaviors import ButtonBehavior
# from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image as KivyImage
# ************************************************************************
class MyButton(ButtonBehavior, KivyImage, LBase):
def __init__(self, **kwargs):
super(MyButton, self).__init__(**kwargs)
# super(MyButton, self).__init__()
self.src = None
if ('image' in kwargs):
self.src = kwargs['image'].source
self.command = None
if ('command' in kwargs):
self.command = kwargs['command']
self.source = self.src
self.allow_stretch = True
def on_press(self):
self.allow_stretch = False
def on_release(self):
self.allow_stretch = True
if (self.command is not None):
self.command()
class MyCheckButton(ButtonBehavior, KivyImage, LBase):
def __init__(self, **kwargs):
super(MyCheckButton, self).__init__(**kwargs)
# super(MyCheckButton, self).__init__()
self.src = None
if ('image' in kwargs):
self.src = kwargs['image'].source
self.command = None
if ('command' in kwargs):
self.command = kwargs['command']
self.variable = None
if ('variable' in kwargs):
self.variable = kwargs['variable']
self.win = None
if ('win' in kwargs):
self.win = kwargs['win']
self.source = self.src
self.allow_stretch = True
self.checked = False
# self.variable = self.win.app.menubar.tkopt.pause
if self.variable:
self.variable.bind(value=self.updateState)
def updateState(self, obj, val):
if (val):
self.allow_stretch = False
else:
self.allow_stretch = True
def isChecked(self):
return self.checked
def on_press(self):
if self.win is None:
return
if self.win.app is None:
return
if self.win.app.game is None:
return
game = self.win.app.game
if game.finished:
return
if game.demo:
return
# if self.win.app.menubar == None: return
# mb = self.win.app.menubar
if game.pause:
self.allow_stretch = True
self.checked = False
if (self.command is not None):
self.command()
else:
self.allow_stretch = False
self.checked = True
if (self.command is not None):
self.command()
def on_release(self):
pass
# ************************************************************************
# * Note: Applications should call show/hide after constructor.
# ************************************************************************
class PysolToolbarTk(BoxLayout):
def __init__(
self,
top,
menubar,
dir,
size=0,
relief='flat',
compound='none'):
super(PysolToolbarTk, self).__init__(orientation='vertical')
self.size_hint = (0.05, 1.0)
# self.size_hint=(None, 1.0)
# self.width = 50
self.win = top
self.menubar = menubar
self.dir = dir
self.win.setTool(self, 3)
for label, f, t in (
(n_("New"), self.mNewGame, _("New game")),
(n_("Restart"), self.mRestart, _("Restart the\ncurrent game")),
(None, None, None),
# (n_("Open"), self.mOpen, _("Open a\nsaved game")),
# (n_("Save"), self.mSave, _("Save game")),
(None, None, None),
(n_("Undo"), self.mUndo, _("Undo last move")),
(n_("Redo"), self.mRedo, _("Redo last move")),
(n_("Autodrop"), self.mDrop, _("Auto drop cards")),
(n_("Shuffle"), self.mShuffle, _("Shuffle tiles")),
(n_("Pause"), self.mPause, _("Pause game")),
(None, None, None),
# (n_("Statistics"), self.mPlayerStats, _("View statistics")),
(n_("Rules"), self.mHelpRules, _("Rules for this game")),
(None, None, None),
(n_("Quit"), self.mHoldAndQuit, _("Quit ") + TITLE),
):
if label is None:
# sep = self._createSeparator()
# sep.bind("<1>", self.clickHandler)
# sep.bind("<3>", self.rightclickHandler)
pass
elif label == 'Pause':
self._createButton(label, f, check=True, tooltip=t)
else:
self._createButton(label, f, tooltip=t)
# hier gibt es noch ein 'player label' mit contextmenu, wo
# der spielername gewählt und die spielstatistik etc.
# angezeigt werden könnte (TBD):
'''
sep = self._createFlatSeparator()
sep.bind("<1>", self.clickHandler)
sep.bind("<3>", self.rightclickHandler)
self._createLabel("player", label=n_('Player'),
tooltip=_("Player options"))
#
self.player_label.bind("<1>", self.mOptPlayerOptions)
# self.player_label.bind("<3>", self.mOptPlayerOptions)
self.popup = MfxMenu(master=None, label=n_('Toolbar'), tearoff=0)
createToolbarMenu(menubar, self.popup)
self.frame.bind("<1>", self.clickHandler)
self.frame.bind("<3>", self.rightclickHandler)
#
self.setCompound(compound, force=True)
'''
def show(self, on, **kw):
side = self.menubar.tkopt.toolbar.get()
self.win.setTool(None, side)
return False
def mHoldAndQuit(self, *args):
if not self._busy():
self.menubar.mHoldAndQuit()
return 1
def getSize(self):
return 0
def updateText(self, **kw):
pass
def config(self, w, v):
print('PysolToolbarTk: config %s, %s' % (w, v))
# y = self.yy
pass
# Lokale.
def _loadImage(self, name):
file = os.path.join(self.dir, name)
image = None
for ext in IMAGE_EXTENSIONS:
file = os.path.join(self.dir, name + ext)
if os.path.isfile(file):
image = LImage(source=file)
# print('_loadImage: file=%s' % file)
# image = Tkinter.PhotoImage(file=file)
break
return image
def _createButton(self, label, command, check=False, tooltip=None):
name = label.lower()
image = self._loadImage(name)
# position = len(self._widgets)
button_relief = TkSettings.toolbar_button_relief
bd = TkSettings.toolbar_button_borderwidth
padx, pady = TkSettings.toolbar_button_padding
kw = {
'toolbar': self,
'toolbar_name': name,
'command': command,
'takefocus': 0,
'text': _(label),
'bd': bd,
'relief': button_relief,
'padx': padx,
'pady': pady,
'overrelief': 'raised',
}
# print ('toolbar: print %s' % self.win)
# print ('toolbar: print %s' % self.win.app)
kw['win'] = self.win
if image:
kw['image'] = image
if check:
kw['offrelief'] = button_relief
kw['indicatoron'] = False
kw['selectcolor'] = ''
button = MyCheckButton(**kw)
else:
button = MyButton(**kw)
# button.show(orient=self.orient)
setattr(self, name + "_image", image)
setattr(self, name + "_button", button)
# self._widgets.append(button)
self.add_widget(button)
# TBD: tooltip ev. auf basis einer statuszeile implementieren
# if tooltip:
# b = MfxTooltip(button)
# self._tooltips.append(b)
# b.setText(tooltip)
return button
def _busy(self):
# if not self.side or not self.game or not self.menubar:
# return 1
if not self.game or not self.menubar:
return 1
print('_busy:')
self.game.stopDemo()
self.game.interruptSleep()
return self.game.busy
| gpl-3.0 | 5,466,700,252,095,288,000 | 32.103976 | 78 | 0.506328 | false |
treyhunner/django-simple-history | docs/conf.py | 1 | 8369 | # -*- coding: utf-8 -*-
#
# django-simple-history documentation build configuration file, created by
# sphinx-quickstart on Sun May 5 16:10:02 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from pkg_resources import get_distribution
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "django-simple-history"
copyright = "2013, Corey Bertram"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = get_distribution("django-simple-history").version
# for example take major/minor
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "django-simple-historydoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"django-simple-history.tex",
"django-simple-history Documentation",
"Corey Bertram",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"django-simple-history",
"django-simple-history Documentation",
["Corey Bertram"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"django-simple-history",
"django-simple-history Documentation",
"Corey Bertram",
"django-simple-history",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| bsd-3-clause | -8,592,172,093,522,146,000 | 30.700758 | 80 | 0.690644 | false |
umuzungu/zipline | tests/test_examples.py | 1 | 3717 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tarfile
import matplotlib
from nose_parameterized import parameterized
import pandas as pd
from zipline import examples, run_algorithm
from zipline.data.bundles import register, unregister
from zipline.testing import test_resource_path
from zipline.testing.fixtures import WithTmpDir, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.cache import dataframe_cache
# Otherwise the next line sometimes complains about being run too late.
_multiprocess_can_split_ = False
matplotlib.use('Agg')
class ExamplesTests(WithTmpDir, ZiplineTestCase):
# some columns contain values with unique ids that will not be the same
cols_to_check = [
'algo_volatility',
'algorithm_period_return',
'alpha',
'benchmark_period_return',
'benchmark_volatility',
'beta',
'capital_used',
'ending_cash',
'ending_exposure',
'ending_value',
'excess_return',
'gross_leverage',
'long_exposure',
'long_value',
'longs_count',
'max_drawdown',
'max_leverage',
'net_leverage',
'period_close',
'period_label',
'period_open',
'pnl',
'portfolio_value',
'positions',
'returns',
'short_exposure',
'short_value',
'shorts_count',
'sortino',
'starting_cash',
'starting_exposure',
'starting_value',
'trading_days',
'treasury_period_return',
]
@classmethod
def init_class_fixtures(cls):
super(ExamplesTests, cls).init_class_fixtures()
register('test', lambda *args: None)
cls.add_class_callback(partial(unregister, 'test'))
with tarfile.open(test_resource_path('example_data.tar.gz')) as tar:
tar.extractall(cls.tmpdir.path)
cls.expected_perf = dataframe_cache(
cls.tmpdir.getpath(
'example_data/expected_perf/%s' %
pd.__version__.replace('.', '-'),
),
serialization='pickle',
)
@parameterized.expand(e for e in dir(examples) if not e.startswith('_'))
def test_example(self, example):
mod = getattr(examples, example)
actual_perf = run_algorithm(
handle_data=mod.handle_data,
initialize=mod.initialize,
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ={
'ZIPLINE_ROOT': self.tmpdir.getpath('example_data/root'),
},
capital_base=1e7,
**mod._test_args()
)
assert_equal(
actual_perf[self.cols_to_check],
self.expected_perf[example][self.cols_to_check],
# There is a difference in the datetime columns in pandas
# 0.16 and 0.17 because in 16 they are object and in 17 they are
# datetime[ns, UTC]. We will just ignore the dtypes for now.
check_dtype=False,
)
| apache-2.0 | -8,221,779,380,375,644,000 | 31.605263 | 76 | 0.620393 | false |
ecreall/lagendacommun | lac/views/services_processes/moderation_service/see_service.py | 1 | 2407 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from dace.util import getSite
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from lac.content.processes.services_processes.behaviors import (
SeeModerationService, SeeModerationUnitService)
from lac.content.service import (
ModerationService, ModerationServiceUnit)
from lac.utilities.utils import (
ObjectRemovedException, generate_navbars)
@view_config(
name='seemoderationservice',
context=ModerationService,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeModerationServiceView(BasicView):
title = ''
name = 'seemoderationservice'
behaviors = [SeeModerationService]
template = 'lac:views/services_processes/moderation_service/templates/see_moderation_service.pt'
viewid = 'seemoderationservice'
def update(self):
self.execute(None)
result = {}
try:
navbars = generate_navbars(self, self.context, self.request)
except ObjectRemovedException:
return HTTPFound(self.request.resource_url(getSite(), ''))
values = {'object': self.context,
'navbar_body': navbars['navbar_body']}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
item['messages'] = navbars['messages']
item['isactive'] = navbars['isactive']
result.update(navbars['resources'])
result['coordinates'] = {self.coordinates: [item]}
return result
@view_config(
name='seemoderationserviceunit',
context=ModerationServiceUnit,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeModerationServiceUnitView(SeeModerationServiceView):
title = ''
name = 'seemoderationserviceunit'
behaviors = [SeeModerationUnitService]
template = 'lac:views/services_processes/moderation_service/templates/see_moderation_service.pt'
viewid = 'seemoderationserviceunit'
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeModerationService: SeeModerationServiceView})
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeModerationUnitService: SeeModerationServiceUnitView})
| agpl-3.0 | -3,885,177,443,963,789,300 | 33.385714 | 100 | 0.726215 | false |
sujitpal/dl-models-for-qa | src/qa-blstm-attn.py | 1 | 3916 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from gensim.models import Word2Vec
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Merge, Dropout, Reshape, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import Bidirectional
from keras.models import Sequential
from sklearn.cross_validation import train_test_split
import numpy as np
import os
import kaggle
DATA_DIR = "../data/comp_data"
MODEL_DIR = "../data/models"
WORD2VEC_BIN = "GoogleNews-vectors-negative300.bin.gz"
WORD2VEC_EMBED_SIZE = 300
QA_TRAIN_FILE = "8thGr-NDMC-Train.csv"
QA_EMBED_SIZE = 64
BATCH_SIZE = 32
NBR_EPOCHS = 20
## extract data
print("Loading and formatting data...")
qapairs = kaggle.get_question_answer_pairs(
os.path.join(DATA_DIR, QA_TRAIN_FILE))
question_maxlen = max([len(qapair[0]) for qapair in qapairs])
answer_maxlen = max([len(qapair[1]) for qapair in qapairs])
seq_maxlen = max([question_maxlen, answer_maxlen])
word2idx = kaggle.build_vocab([], qapairs, [])
vocab_size = len(word2idx) + 1 # include mask character 0
Xq, Xa, Y = kaggle.vectorize_qapairs(qapairs, word2idx, seq_maxlen)
Xqtrain, Xqtest, Xatrain, Xatest, Ytrain, Ytest = \
train_test_split(Xq, Xa, Y, test_size=0.3, random_state=42)
print(Xqtrain.shape, Xqtest.shape, Xatrain.shape, Xatest.shape,
Ytrain.shape, Ytest.shape)
# get embeddings from word2vec
# see https://github.com/fchollet/keras/issues/853
print("Loading Word2Vec model and generating embedding matrix...")
word2vec = Word2Vec.load_word2vec_format(
os.path.join(DATA_DIR, WORD2VEC_BIN), binary=True)
embedding_weights = np.zeros((vocab_size, WORD2VEC_EMBED_SIZE))
for word, index in word2idx.items():
try:
embedding_weights[index, :] = word2vec[word.lower()]
except KeyError:
pass # keep as zero (not ideal, but what else can we do?)
print("Building model...")
qenc = Sequential()
qenc.add(Embedding(output_dim=WORD2VEC_EMBED_SIZE, input_dim=vocab_size,
input_length=seq_maxlen,
weights=[embedding_weights]))
qenc.add(Bidirectional(LSTM(QA_EMBED_SIZE, return_sequences=True),
merge_mode="sum"))
qenc.add(Dropout(0.3))
aenc = Sequential()
aenc.add(Embedding(output_dim=WORD2VEC_EMBED_SIZE, input_dim=vocab_size,
input_length=seq_maxlen,
weights=[embedding_weights]))
aenc.add(Bidirectional(LSTM(QA_EMBED_SIZE, return_sequences=True),
merge_mode="sum"))
aenc.add(Dropout(0.3))
# attention model
attn = Sequential()
attn.add(Merge([qenc, aenc], mode="dot", dot_axes=[1, 1]))
attn.add(Flatten())
attn.add(Dense((seq_maxlen * QA_EMBED_SIZE)))
attn.add(Reshape((seq_maxlen, QA_EMBED_SIZE)))
model = Sequential()
model.add(Merge([qenc, attn], mode="sum"))
model.add(Flatten())
model.add(Dense(2, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
print("Training...")
checkpoint = ModelCheckpoint(
filepath=os.path.join(MODEL_DIR, "qa-blstm-attn-best.hdf5"),
verbose=1, save_best_only=True)
model.fit([Xqtrain, Xatrain], Ytrain, batch_size=BATCH_SIZE,
nb_epoch=NBR_EPOCHS, validation_split=0.1,
callbacks=[checkpoint])
print("Evaluation...")
loss, acc = model.evaluate([Xqtest, Xatest], Ytest, batch_size=BATCH_SIZE)
print("Test loss/accuracy final model = %.4f, %.4f" % (loss, acc))
model.save_weights(os.path.join(MODEL_DIR, "qa-blstm-attn-final.hdf5"))
with open(os.path.join(MODEL_DIR, "qa-blstm-attn.json"), "wb") as fjson:
fjson.write(model.to_json())
model.load_weights(filepath=os.path.join(MODEL_DIR, "qa-blstm-attn-best.hdf5"))
loss, acc = model.evaluate([Xqtest, Xatest], Ytest, batch_size=BATCH_SIZE)
print("\nTest loss/accuracy best model = %.4f, %.4f" % (loss, acc))
| apache-2.0 | -2,891,411,434,089,374,000 | 35.598131 | 79 | 0.699183 | false |
rv816/serrano_night | serrano/resources/stats.py | 1 | 3399 | from django.core.urlresolvers import reverse
from django.conf.urls import patterns, url
from django.views.decorators.cache import never_cache
from restlib2.params import Parametizer, BoolParam, StrParam
from avocado.models import DataContext, DataField
from avocado.query import pipeline
from .base import BaseResource, ThrottledResource
class StatsResource(BaseResource):
def get(self, request):
uri = request.build_absolute_uri
return {
'title': 'Serrano Stats Endpoint',
'_links': {
'self': {
'href': uri(reverse('serrano:stats:root')),
},
'counts': {
'href': uri(reverse('serrano:stats:counts')),
},
}
}
class CountStatsParametizer(Parametizer):
aware = BoolParam(False)
processor = StrParam('default', choices=pipeline.query_processors)
class CountStatsResource(ThrottledResource):
parametizer = CountStatsParametizer
def get(self, request):
params = self.get_params(request)
if params['aware']:
context = self.get_context(request)
else:
context = DataContext()
# Get all published app/model pairs to produce counts for.
model_names = DataField.objects.published()\
.values_list('app_name', 'model_name')\
.order_by('model_name').distinct()
data = []
models = set()
QueryProcessor = pipeline.query_processors[params['processor']]
for app_name, model_name in model_names:
# DataField used here to resolve foreign key-based fields.
model = DataField(app_name=app_name, model_name=model_name).model
# Foreign-key based fields may resolve to models that are already
# accounted for.
if model in models:
continue
models.add(model)
# Build a queryset through the context which is toggled by
# the parameter.
processor = QueryProcessor(context=context, tree=model)
queryset = processor.get_queryset(request=request)
count = queryset.values('pk').distinct().count()
opts = model._meta
# Format is called to resolve Django's internal proxy wrapper.
verbose_name = opts.verbose_name.format()
verbose_name_plural = opts.verbose_name_plural.format()
# Assume no custom verbose_name as been set in Meta class, so
# apply a minimal title-case.
if verbose_name.islower():
verbose_name = verbose_name.title()
if verbose_name_plural.islower():
verbose_name_plural = verbose_name_plural.title()
data.append({
'count': count,
'app_name': app_name,
'model_name': model_name,
'verbose_name': verbose_name,
'verbose_name_plural': verbose_name_plural,
})
return data
# Same logic, but supports submitting context via a POST.
post = get
stats_resource = never_cache(StatsResource())
counts_resource = never_cache(CountStatsResource())
# Resource endpoints
urlpatterns = patterns(
'',
url(r'^$', stats_resource, name='root'),
url(r'^counts/$', counts_resource, name='counts'),
)
| bsd-2-clause | 6,553,799,984,254,183,000 | 30.472222 | 77 | 0.596646 | false |
radjkarl/appBase | appbase/Launcher.py | 1 | 22087 | # coding=utf-8
from __future__ import print_function
from builtins import str
# -*- coding: utf-8 -*-
###############
# The launcher class is not updated any more
# I might remove it
###############
# own
import appbase
from fancytools.os.PathStr import PathStr
from fancywidgets.pyQtBased.Dialogs import Dialogs
# foreign
from qtpy import QtGui, QtWidgets, QtCore, QtSvg
# built-in
import os
from zipfile import ZipFile
import distutils
from distutils import spawn
import subprocess
import sys
import tempfile
CONFIG_FILE = PathStr.home().join(__name__)
class Launcher(QtWidgets.QMainWindow):
"""
A graphical starter for *.pyz files created by the save-method from
appbase.MainWindow
NEEDS AN OVERHAUL ... after that's done it will be able to:
* show all *.pyz-files in a filetree
* show the session specific ...
* icon
* description
* author etc.
* start, remove, rename, modify a session
* modify, start a certain state of a session
"""
def __init__(self,
title='PYZ-Launcher',
icon=None,
start_script=None,
left_header=None,
right_header=None,
file_type='pyz'
):
self.dialogs = Dialogs()
_path = PathStr.getcwd()
_default_text_color = '#3c3c3c'
if icon is None:
icon = os.path.join(_path, 'media', 'launcher_logo.svg')
if start_script is None:
start_script = os.path.join(_path, 'test_session.py')
if left_header is None:
_description = "<a href=%s style='color: %s'>%s</a>" % (
appbase.__url__, _default_text_color, appbase.__doc__)
left_header = """<b>%s</b><br>
version
<a href=%s style='color: %s'>%s</a><br>
autor
<a href=mailto:%s style='color: %s'>%s</a> """ % ( # text-decoration:underline
_description,
os.path.join(_path, 'media', 'recent_changes.txt'),
_default_text_color,
appbase.__version__,
appbase.__email__,
_default_text_color,
appbase.__author__
)
if right_header is None:
# if no header is given, list all pdfs in folder media as link
d = _path
right_header = ''
for f in os.listdir(os.path.join(d, 'media')):
if f.endswith('.pdf'):
_guidePath = os.path.join(d, 'media', f)
right_header += "<a href=%s style='color: %s'>%s</a><br>" % (
_guidePath, _default_text_color, f[:-4])
right_header = right_header[:-4]
QtWidgets.QMainWindow.__init__(self)
self._start_script = start_script
self.setWindowTitle(title)
self.setWindowIcon(QtGui.QIcon(icon))
self.resize(900, 500)
# BASE STRUTURE
area = QtWidgets.QWidget()
self.setCentralWidget(area)
layout = QtWidgets.QVBoxLayout()
area.setLayout(layout)
#header = QtWidgets.QHBoxLayout()
# layout.addLayout(header)
# grab the default text color of a qlabel to color all links from blue to it:
# LEFT TEXT
info = QtWidgets.QLabel(left_header)
info.setOpenExternalLinks(True)
# LOGO
header = QtWidgets.QWidget()
header.setFixedHeight(70)
headerlayout = QtWidgets.QHBoxLayout()
header.setLayout(headerlayout)
logo = QtSvg.QSvgWidget(icon)
logo.setFixedWidth(50)
logo.setFixedHeight(50)
headerlayout.addWidget(logo)
headerlayout.addWidget(info)
layout.addWidget(header)
# RIGHT_HEADER
userGuide = QtWidgets.QLabel(right_header)
userGuide.setOpenExternalLinks(True)
userGuide.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignRight)
headerlayout.addWidget(userGuide)
# ROOT-PATH OF THE SESSIONS
rootLayout = QtWidgets.QHBoxLayout()
rootFrame = QtWidgets.QFrame()
rootFrame.setFrameStyle(
QtWidgets.QFrame.StyledPanel | QtWidgets.QFrame.Plain)
rootFrame.setFixedHeight(45)
rootFrame.setLineWidth(0)
rootFrame.setLayout(rootLayout)
layout.addWidget(rootFrame)
self.rootDir = QtWidgets.QLabel()
self.rootDir.setAutoFillBackground(True)
self.rootDir.setStyleSheet("QLabel { background-color: white; }")
# FILE-BROWSER
self.treeView = _TreeView()
self.fileSystemModel = _FileSystemModel(self.treeView, file_type)
self.fileSystemModel.setNameFilters(['*.%s' % file_type])
self.fileSystemModel.setNameFilterDisables(False)
self.treeView.setModel(self.fileSystemModel)
treelayout = QtWidgets.QHBoxLayout()
splitter = QtWidgets.QSplitter(QtCore.Qt.Orientation(1))
self.fileInfo = _PyzInfo(splitter, self.fileSystemModel, self.treeView)
self.treeView.clicked.connect(self.fileInfo.update)
splitter.addWidget(self.treeView)
splitter.addWidget(self.fileInfo)
treelayout.addWidget(splitter)
layout.addLayout(treelayout)
# get last root-path
self._path = PathStr('')
if CONFIG_FILE:
try:
self._path = PathStr(
open(
CONFIG_FILE,
'r').read().decode('unicode-escape'))
except IOError:
pass # file not existant
if not self._path or not self._path.exists():
msgBox = QtWidgets.QMessageBox()
msgBox.setText("Please choose your projectDirectory.")
msgBox.exec_()
self._changeRootDir()
self.treeView.setPath(self._path)
abspath = os.path.abspath(self._path)
self.rootDir.setText(abspath)
rootLayout.addWidget(self.rootDir)
# GO UPWARDS ROOT-PATH BUTTON
btnUpRootDir = QtWidgets.QPushButton('up')
btnUpRootDir.clicked.connect(self._goUpRootDir)
rootLayout.addWidget(btnUpRootDir)
# DEFINE CURRENT DIR AS ROOT-PATH
btnDefineRootDir = QtWidgets.QPushButton('set')
btnDefineRootDir.clicked.connect(self._defineRootDir)
rootLayout.addWidget(btnDefineRootDir)
# SELECT ROOT-PATH BUTTON
buttonRootDir = QtWidgets.QPushButton('select')
buttonRootDir.clicked.connect(self._changeRootDir)
rootLayout.addWidget(buttonRootDir)
# NEW-BUTTON
if self._start_script:
newButton = QtWidgets.QPushButton('NEW')
newButton.clicked.connect(self._openNew)
layout.addWidget(newButton)
@staticmethod
def rootDir():
try:
return PathStr(
open(CONFIG_FILE, 'r').read().decode('unicode-escape'))
except IOError: # create starter
return PathStr.home()
def _goUpRootDir(self):
self._setRootDir(self._path.dirname())
def _defineRootDir(self):
i = self.treeView.selectedIndexes()
# if not self.treeView.isIndexHidden(i):
if i:
if self.fileSystemModel.isDir(i[0]):
self._setRootDir(PathStr(self.fileSystemModel.filePath(i[0])))
def _changeRootDir(self):
path = self.dialogs.getExistingDirectory()
if path:
self._setRootDir(path)
def _setRootDir(self, path):
self._path = path
self.rootDir.setText(self._path)
root = self.fileSystemModel.setRootPath(self._path)
self.treeView.setRootIndex(root)
# save last path to file
if CONFIG_FILE:
open(CONFIG_FILE, 'w').write(self._path.encode('unicode-escape'))
def _openNew(self):
p = spawn.find_executable("python")
os.spawnl(os.P_NOWAIT, p, 'python', '%s' % self._start_script)
class _FileEditMenu(QtWidgets.QWidget):
def __init__(self, treeView):
QtWidgets.QWidget.__init__(self)
self._treeView = treeView
self._menu = QtWidgets.QMenu(self)
d = PathStr.getcwd()
iconpath = os.path.join(d, 'media', 'icons', 'approve.svg')
self._actionStart = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Start', self._treeView,
triggered=self._treeView.openProject)
iconpath = os.path.join(d, 'media', 'icons', 'delete.svg')
delete = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Delete', self._treeView,
triggered=self._treeView.deleteSelected)
iconpath = os.path.join(d, 'media', 'icons', 'rename.svg')
rename = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Rename', self._treeView,
triggered=self._treeView.editSelected)
iconpath = os.path.join(d, 'media', 'icons', 'new.svg')
newDir = QtWidgets.QAction(QtGui.QIcon(iconpath),
'New Directory', self._treeView,
triggered=self._treeView.newDirInSelected)
iconpath = os.path.join(d, 'media', 'icons', 'findReplace.svg')
self._editStartScript = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Edit start script', self._treeView,
triggered=self._treeView.editStartScriptInSelected)
iconpath = os.path.join(d, 'media', 'icons', 'bug.png')
self._actionInDebugMode = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Run in debug mode', self._treeView,
triggered=self._treeView.runInDebugMode)
self._menu.addAction(self._actionStart)
self._menu.addAction(rename)
self._menu.addAction(newDir)
self._menu.addAction(self._editStartScript)
self._menu.addAction(delete)
self._menu.addAction(self._actionInDebugMode)
# TODO: does not match signature
def show(self, evt):
isDir = self._treeView.selectedIsDir(evt.pos())
self._actionStart.setVisible(not isDir)
self._editStartScript.setVisible(not isDir)
self._actionInDebugMode.setVisible(not isDir)
self._menu.popup(evt.globalPos())
class _TreeView(QtWidgets.QTreeView):
def __init__(self):
super(_TreeView, self).__init__()
self.setHeaderHidden(False)
# connect own function for doubleclick
self.setExpandsOnDoubleClick(False)
self._menu = _FileEditMenu(self)
# no editing of the items when clicked, rightclicked, doubleclicked:
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.sortByColumn(0, QtCore.Qt.AscendingOrder) # sort by name
self.setSortingEnabled(True)
self.setAnimated(True) # expanding/collapsing animated
self.setIconSize(QtCore.QSize(60, 60))
# DRAG/DROP
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.doubleClicked.connect(self._doubleClicked)
def keyPressEvent(self, event):
if event.matches(QtGui.QKeySequence.Delete):
self.deleteSelected()
def selectionChanged(self, selected, deselected):
for index in deselected.indexes():
# print index
self.closePersistentEditor(index)
super(_TreeView, self).selectionChanged(selected, deselected)
def mousePressEvent(self, event):
mouseBtn = event.button()
if mouseBtn == QtCore.Qt.RightButton:
self._menu.show(event)
super(_TreeView, self).mousePressEvent(event)
def deleteSelected(self):
msgBox = QtWidgets.QMessageBox()
msgBox.setText("Are you sure?")
msgBox.addButton('Yes', QtWidgets.QMessageBox.YesRole)
msgBox.addButton('No', QtWidgets.QMessageBox.RejectRole)
ret = msgBox.exec_()
if ret == 0: # yes
self.fileSystemModel.remove(self.currentIndex())
def selectedIsDir(self, pos):
index = self.indexAt(pos)
return self.fileSystemModel.isDir(index)
def editSelected(self):
self.openPersistentEditor(self.currentIndex())
def newDirInSelected(self):
index = self.currentIndex()
if not self.fileSystemModel.isDir(index):
index = index.parent()
else:
self.setExpanded(index, True)
self.fileSystemModel.mkdir(index, 'NEW')
def editStartScriptInSelected(self):
index = self.currentIndex()
self.fileSystemModel.editStartScript(index)
def dropEvent(self, e):
index = self.indexAt(e.pos())
# only insert into directories
if self.fileSystemModel.isDir(index):
super(_TreeView, self).dropEvent(e)
def setModel(self, model):
self.fileSystemModel = model
super(_TreeView, self).setModel(model)
self.setColumnWidth(0, 300)
self.hideColumn(1) # type
self.hideColumn(2) # size
def setPath(self, path):
self._path = path
root = self.fileSystemModel.setRootPath(self._path)
self.setRootIndex(root)
def _doubleClicked(self, index):
# if folder->toggle expanding
if self.fileSystemModel.isDir(index):
self.setExpanded(index, not self.isExpanded(index))
else:
self.openProject(index)
def runInDebugMode(self):
index = self.currentIndex()
#term = os.environ.get('TERM')
self.fileSystemModel.updateStartStript(index)
if os.name == 'posix': # linux
term = 'xterm'
else:
sys.exit('debug mode not supported on windows yet')
subprocess.call([term, '-e',
'python %s -d' % self.fileSystemModel.filePath(index)])
def openProject(self, index=None):
if not index:
index = self.currentIndex()
self.fileSystemModel.updateStartStript(index)
p = distutils.spawn.find_executable("python")
# start an indepentent python-process
os.spawnl(
os.P_NOWAIT, p, 'python', '%s' %
self.fileSystemModel.filePath(index))
class _PyzInfo(QtWidgets.QWidget):
def __init__(self, vsplitter, filesystemmodel, treeView):
QtWidgets.QWidget.__init__(self)
self.layout = QtWidgets.QVBoxLayout()
self._filesystemmodel = filesystemmodel
self._treeView = treeView
self.vsplitter = vsplitter
self.hsplitter = QtWidgets.QSplitter(QtCore.Qt.Orientation(0))
self.vsplitter.splitterMoved.connect(self.scaleImgV)
self.hsplitter.splitterMoved.connect(self.scaleImgH)
self.layout.addWidget(self.hsplitter)
self._sizeDefined = False
self.setLayout(self.layout)
self.img = QtWidgets.QLabel()
self.text = QtWidgets.QTextEdit()
self.text.setReadOnly(True)
self.hsplitter.addWidget(self.img)
self.hsplitter.addWidget(self.text)
btnStart = QtWidgets.QPushButton('start')
self._btnDebug = QtWidgets.QCheckBox('debug mode')
#labelOpen = QtWidgets.QLabel('open/edit')
openBox = QtWidgets.QGroupBox('open/edit')
openBox.setAlignment(QtCore.Qt.AlignHCenter)
btnCode = QtWidgets.QPushButton('startscript')
btnActivities = QtWidgets.QPushButton('activities')
btnLogs = QtWidgets.QPushButton('logs')
btnStart.clicked.connect(self._startPYZ)
btnCode.clicked.connect(self._treeView.editStartScriptInSelected)
lBtn = QtWidgets.QHBoxLayout()
lStart = QtWidgets.QVBoxLayout()
lOpen = QtWidgets.QHBoxLayout()
# lOpen.addWidget(openBox)
openBox.setLayout(lOpen)
lBtn.addLayout(lStart)
lBtn.addWidget(openBox)
lStart.addWidget(btnStart)
lStart.addWidget(self._btnDebug)
#lOpen.addWidget(labelOpen, alignment=QtCore.Qt.AlignCenter)
# lOpenBtn = QtWidgets.QHBoxLayout()
# lOpen.addLayout(lOpenBtn)
lOpen.addWidget(btnCode)
lOpen.addWidget(btnActivities)
lOpen.addWidget(btnLogs)
self.layout.addLayout(lBtn)
self.hide()
def _startPYZ(self):
if self._btnDebug.isChecked():
self._treeView.runInDebugMode()
else:
self._treeView.openProject()
def scaleImgV(self, sizeTreeView, pos):
width = self.vsplitter.sizes()[1] - 30
self.img.setPixmap(QtGui.QPixmap(self.imgpath).scaledToWidth(width))
def scaleImgH(self, sizeTreeView, pos):
height = self.hsplitter.sizes()[0] - 30
self.img.setPixmap(QtGui.QPixmap(self.imgpath).scaledToHeight(height))
def update(self, index):
if self._filesystemmodel.isPyz(index):
(self.imgpath, description_path) = self._filesystemmodel.extractFiles(
index, 'screenshot.png', 'description.html')
# if not self.imgpath:
# self.imgpath = self.filesystemmodel.extractFiles(index,'icon')[0]
# print self.imgpath
if self.imgpath:
if not self._sizeDefined:
self._sizeDefined = True
width = 400
# self.splitter.sizes()[0]*0.5,1)
self.vsplitter.moveSplitter(400, 1)
self.img.setPixmap(
QtGui.QPixmap(
self.imgpath).scaledToWidth(width))
self.img.show()
else:
self.img.hide()
if description_path:
self.text.setText(file(description_path).read())
else:
self.text.setText('<b>No Description found</b>')
self.show()
else:
self.hide()
class _FileSystemModel(QtWidgets.QFileSystemModel):
def __init__(self, view, file_type):
QtWidgets.QFileSystemModel.__init__(self, view)
self.view = view
self.file_type = file_type
self.setReadOnly(False)
self._editedSessions = {}
self._tmp_dir_work = tempfile.mkdtemp('PYZ-launcher')
def isPyz(self, index):
return str(self.fileName(index)).endswith('.%s' % self.file_type)
def extractFiles(self, index, *fnames):
extnames = []
with ZipFile(str(self.filePath(index)), 'r') as myzip:
for name in fnames:
try:
myzip.extract(name, self._tmp_dir_work)
extnames.append(os.path.join(self._tmp_dir_work, name))
except KeyError:
extnames.append(None)
return extnames
# TODO: does not match signature
def data(self, index, role):
"""use zipped icon.png as icon"""
if index.column() == 0 and role == QtCore.Qt.DecorationRole:
if self.isPyz(index):
with ZipFile(str(self.filePath(index)), 'r') as myzip:
# print myzip.namelist()
try:
myzip.extract('icon', self._tmp_dir_work)
p = os.path.join(self._tmp_dir_work, 'icon')
return QtGui.QIcon(p)
except KeyError:
pass
return super(_FileSystemModel, self).data(index, role)
def editStartScript(self, index):
"""open, edit, replace __main__.py"""
f = str(self.fileName(index))
if f.endswith('.%s' % self.file_type):
zipname = str(self.filePath(index))
with ZipFile(zipname, 'a') as myzip:
# extract+save script in tmp-dir:
myzip.extract('__main__.py', self._tmp_dir_work)
tempfilename = f[:-4]
tempfilepath = os.path.join(self._tmp_dir_work, tempfilename)
os.rename(
os.path.join(
self._tmp_dir_work,
'__main__.py'),
tempfilepath)
self.openTxt(tempfilepath)
self._editedSessions[index] = (
zipname, self._tmp_dir_work, tempfilename)
def openTxt(self, path):
# open and editor (depending on platform):
if sys.platform.startswith('darwin'):
subprocess.call(('open', path))
elif os.name == 'nt':
os.startfile(path)
elif os.name == 'posix':
subprocess.call(('xdg-open', path))
def updateStartStript(self, index):
if index in self._editedSessions:
zipname, dirname, tempfilename = self._editedSessions[index]
tempfilepath = os.path.join(dirname, tempfilename)
# print dirname, tempfilename
if os.path.exists(tempfilepath):
print("adopt changed startScript '%s'" % tempfilename)
with ZipFile(zipname, 'a') as myzip:
myzip.write(tempfilepath, '__main__.py')
os.remove(tempfilepath)
if __name__ == '__main__':
app = QtWidgets.QApplication([])
a = Launcher()
a.show()
sys.exit(app.exec_())
| gpl-3.0 | -3,223,180,019,371,425,300 | 35.950172 | 101 | 0.568841 | false |
Qusic/ycmd | ycmd/tests/cs/get_completions_test.py | 1 | 17273 | # coding: utf-8
#
# Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import ( assert_that, calling, empty, greater_than, has_item,
has_items, has_entries, raises )
from nose.tools import eq_
from webtest import AppError
from ycmd.tests.cs import PathToTestFile, SharedYcmd, WrapOmniSharpServer
from ycmd.tests.test_utils import BuildRequest, CompletionEntryMatcher
from ycmd.utils import ReadFile
@SharedYcmd
def GetCompletions_Basic_test( app ):
filepath = PathToTestFile( 'testy', 'Program.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 10,
column_num = 12 )
response_data = app.post_json( '/completions', completion_data ).json
assert_that( response_data[ 'completions' ],
has_items( CompletionEntryMatcher( 'CursorLeft' ),
CompletionEntryMatcher( 'CursorSize' ) ) )
eq_( 12, response_data[ 'completion_start_column' ] )
@SharedYcmd
def GetCompletions_Unicode_test( app ):
filepath = PathToTestFile( 'testy', 'Unicode.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 43,
column_num = 26 )
response_data = app.post_json( '/completions', completion_data ).json
assert_that( response_data[ 'completions' ],
has_items(
CompletionEntryMatcher( 'DoATest()' ),
CompletionEntryMatcher( 'an_int' ),
CompletionEntryMatcher( 'a_unicøde' ),
CompletionEntryMatcher( 'øøø' ) ) )
eq_( 26, response_data[ 'completion_start_column' ] )
@SharedYcmd
def GetCompletions_MultipleSolution_test( app ):
filepaths = [ PathToTestFile( 'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'testy',
'Program.cs' ) ]
lines = [ 10, 9 ]
for filepath, line in zip( filepaths, lines ):
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = line,
column_num = 12 )
response_data = app.post_json( '/completions',
completion_data ).json
assert_that( response_data[ 'completions' ],
has_items( CompletionEntryMatcher( 'CursorLeft' ),
CompletionEntryMatcher( 'CursorSize' ) ) )
eq_( 12, response_data[ 'completion_start_column' ] )
@SharedYcmd
def GetCompletions_PathWithSpace_test( app ):
filepath = PathToTestFile( u'неприличное слово', 'Program.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 12 )
response_data = app.post_json( '/completions', completion_data ).json
assert_that( response_data[ 'completions' ],
has_items( CompletionEntryMatcher( 'CursorLeft' ),
CompletionEntryMatcher( 'CursorSize' ) ) )
eq_( 12, response_data[ 'completion_start_column' ] )
@SharedYcmd
def GetCompletions_HasBothImportsAndNonImport_test( app ):
filepath = PathToTestFile( 'testy', 'ImportTest.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 12,
force_semantic = True,
query = 'Date' )
response_data = app.post_json( '/completions', completion_data ).json
assert_that(
response_data[ 'completions' ],
has_items( CompletionEntryMatcher( 'DateTime' ),
CompletionEntryMatcher( 'DateTimeStyles' ) )
)
@SharedYcmd
def GetCompletions_ImportsOrderedAfter_test( app ):
filepath = PathToTestFile( 'testy', 'ImportTest.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 12,
force_semantic = True,
query = 'Date' )
response_data = app.post_json( '/completions', completion_data ).json
min_import_index = min(
loc for loc, val
in enumerate( response_data[ 'completions' ] )
if val[ 'extra_data' ][ 'required_namespace_import' ]
)
max_nonimport_index = max(
loc for loc, val
in enumerate( response_data[ 'completions' ] )
if not val[ 'extra_data' ][ 'required_namespace_import' ]
)
assert_that( min_import_index, greater_than( max_nonimport_index ) ),
@SharedYcmd
def GetCompletions_ForcedReturnsResults_test( app ):
filepath = PathToTestFile( 'testy', 'ContinuousTest.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 21,
force_semantic = True,
query = 'Date' )
response_data = app.post_json( '/completions', completion_data ).json
assert_that( response_data[ 'completions' ],
has_items( CompletionEntryMatcher( 'String' ),
CompletionEntryMatcher( 'StringBuilder' ) ) )
@SharedYcmd
def GetCompletions_NonForcedReturnsNoResults_test( app ):
filepath = PathToTestFile( 'testy', 'ContinuousTest.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
event_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
app.post_json( '/event_notification', event_data )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 21,
force_semantic = False,
query = 'Date' )
results = app.post_json( '/completions', completion_data ).json
# There are no semantic completions. However, we fall back to identifier
# completer in this case.
assert_that( results, has_entries( {
'completions': has_item( has_entries( {
'typed_text' : 'String',
'extra_menu_info': '[ID]',
} ) ),
'errors': empty(),
} ) )
@SharedYcmd
def GetCompletions_ForcedDividesCache_test( app ):
filepath = PathToTestFile( 'testy', 'ContinuousTest.cs' )
with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
event_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
app.post_json( '/event_notification', event_data )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 21,
force_semantic = True,
query = 'Date' )
results = app.post_json( '/completions', completion_data ).json
assert_that( results[ 'completions' ], not( empty() ) )
assert_that( results[ 'errors' ], empty() )
completion_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 9,
column_num = 21,
force_semantic = False,
query = 'Date' )
results = app.post_json( '/completions', completion_data ).json
# There are no semantic completions. However, we fall back to identifier
# completer in this case.
assert_that( results, has_entries( {
'completions': has_item( has_entries( {
'typed_text' : 'String',
'extra_menu_info': '[ID]',
} ) ),
'errors': empty(),
} ) )
@SharedYcmd
def GetCompletions_ReloadSolution_Basic_test( app ):
filepath = PathToTestFile( 'testy', 'Program.cs' )
with WrapOmniSharpServer( app, filepath ):
result = app.post_json(
'/run_completer_command',
BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'ReloadSolution' ],
filepath = filepath,
filetype = 'cs' ) ).json
eq_( result, True )
@SharedYcmd
def GetCompletions_ReloadSolution_MultipleSolution_test( app ):
filepaths = [ PathToTestFile( 'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'testy',
'Program.cs' ) ]
for filepath in filepaths:
with WrapOmniSharpServer( app, filepath ):
result = app.post_json(
'/run_completer_command',
BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'ReloadSolution' ],
filepath = filepath,
filetype = 'cs' ) ).json
eq_( result, True )
def SolutionSelectCheck( app, sourcefile, reference_solution,
extra_conf_store = None ):
# reusable test: verify that the correct solution (reference_solution) is
# detected for a given source file (and optionally a given extra_conf)
if extra_conf_store:
app.post_json( '/load_extra_conf_file',
{ 'filepath': extra_conf_store } )
result = app.post_json( '/run_completer_command',
BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'SolutionFile' ],
filepath = sourcefile,
filetype = 'cs' ) ).json
# Now that cleanup is done, verify solution file
eq_( reference_solution, result)
@SharedYcmd
def GetCompletions_UsesSubfolderHint_test( app ):
SolutionSelectCheck( app,
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'testy.sln' ) )
@SharedYcmd
def GetCompletions_UsesSuperfolderHint_test( app ):
SolutionSelectCheck( app,
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'not-testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'solution-named-like-folder.sln' ) )
@SharedYcmd
def GetCompletions_ExtraConfStoreAbsolute_test( app ):
SolutionSelectCheck( app,
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-abs',
'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'testy2.sln' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-abs',
'.ycm_extra_conf.py' ) )
@SharedYcmd
def GetCompletions_ExtraConfStoreRelative_test( app ):
SolutionSelectCheck( app,
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-rel',
'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-rel',
'testy2.sln' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-rel',
'.ycm_extra_conf.py' ) )
@SharedYcmd
def GetCompletions_ExtraConfStoreNonexisting_test( app ):
SolutionSelectCheck( app,
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-bad',
'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-bad',
'testy2.sln' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'extra-conf-bad',
'testy', '.ycm_extra_conf.py' ) )
@SharedYcmd
def GetCompletions_DoesntStartWithAmbiguousMultipleSolutions_test( app ):
filepath = PathToTestFile( 'testy-multiple-solutions',
'solution-not-named-like-folder',
'testy', 'Program.cs' )
contents = ReadFile( filepath )
event_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
assert_that(
calling( app.post_json ).with_args( '/event_notification', event_data ),
raises( AppError, 'Autodetection of solution file failed' ),
"The Omnisharp server started, despite us not being able to find a "
"suitable solution file to feed it. Did you fiddle with the solution "
"finding code in cs_completer.py? Hopefully you've enhanced it: you need "
"to update this test then :)" )
| gpl-3.0 | 4,409,994,234,565,300,000 | 41.286765 | 79 | 0.513592 | false |
CospanDesign/nysa | nysa/tools/nysa_cli.py | 1 | 7013 | #! /usr/bin/python
#Distributed under the MIT licesnse.
#Copyright (c) 2014 Dave McCoy ([email protected])
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import argparse
import collections
from nysa.common import status
import image_builder
import nysa_utils
import device_list
import generate_slave
import list_boards
import reset_board
import ping_board
import board_programmed
import program_board
import upload_board
import list_platforms
import sdb_viewer
import init
import install_platform
import install_verilog_modules
import install_examples
import nysa_status
import nysa_paths
from completer_extractor import completer_extractor as ce
__author__ = "[email protected] (Dave McCoy)"
SCRIPT_NAME = os.path.basename(__file__)
DESCRIPTION = "Nysa Tool"
COMPLETER_EXTRACTOR = False
TEMP_BASH_COMPLETER_FILEPATH = "nsya"
EPILOG = "Enter the toolname with a -h to find help about that specific tool\n"
TYPE_DICT = collections.OrderedDict()
TYPE_DICT["cbuilder"] = "Functions to help create code to go into platforms"
TYPE_DICT["ibuilder"] = "Functions to generate an entire image (or binary) to be downloaded into a platform"
TYPE_DICT["host"] = "Functions to view and control boards"
TYPE_DICT["utility"] = "Functions to update and/or upgrade the nysa tool including adding new platforms and verilog packages"
TOOL_DICT = collections.OrderedDict([
(generate_slave.NAME,{
"type": "cbuilder",
"module": generate_slave,
"tool": generate_slave.generate_slave
}),
(device_list.NAME,{
"type": "cbuilder",
"module": device_list,
"tool": device_list.device_list
}),
(image_builder.NAME,{
"type": "ibuilder",
"module": image_builder,
"tool": image_builder.image_builder
}),
(reset_board.NAME,{
"type": "host",
"module": reset_board,
"tool": reset_board.reset_board
}),
(ping_board.NAME,{
"type": "host",
"module": ping_board,
"tool": ping_board.ping_board
}),
(board_programmed.NAME,{
"type": "host",
"module": board_programmed,
"tool": board_programmed.board_programmed
}),
(program_board.NAME,{
"type": "host",
"module": program_board,
"tool": program_board.program_board
}),
(upload_board.NAME,{
"type": "host",
"module": upload_board,
"tool": upload_board.upload_board
}),
(sdb_viewer.NAME,{
"type": "host",
"module": sdb_viewer,
"tool": sdb_viewer.view_sdb
}),
(init.NAME,{
"type": "utility",
"module": init,
"tool": init.init
}),
(nysa_utils.NAME,{
"type": "utility",
"module": nysa_utils,
"tool": nysa_utils.nysa_utils
}),
(list_boards.NAME,{
"type": "utility",
"module": list_boards,
"tool": list_boards.list_boards
}),
(list_platforms.NAME,{
"type": "utility",
"module": list_platforms,
"tool": list_platforms.list_platforms
}),
(install_platform.NAME,{
"type": "utility",
"module": install_platform,
"tool": install_platform.install
}),
(install_verilog_modules.NAME,{
"type": "utility",
"module": install_verilog_modules,
"tool": install_verilog_modules.install
}),
(install_examples.NAME,{
"type": "utility",
"module": install_examples,
"tool": install_examples.install
}),
(nysa_status.NAME,{
"type": "utility",
"module": nysa_status,
"tool": nysa_status.nysa_status
}),
(nysa_paths.NAME,{
"type": "utility",
"module": nysa_paths,
"tool": nysa_paths.nysa_paths
})
])
def update_epilog():
global EPILOG
tool_type_dict = collections.OrderedDict()
for type_d in TYPE_DICT:
tool_type_dict[type_d] = {}
tool_type_dict[type_d]["description"] = TYPE_DICT[type_d]
tool_type_dict[type_d]["tools"] = []
for tool in TOOL_DICT:
tool_type_dict[TOOL_DICT[tool]["type"]]["tools"].append(tool)
EPILOG += "\n"
EPILOG += "Tools:\n\n"
for tool_type in tool_type_dict:
EPILOG += "{0:25}{1}\n\n".format(tool_type, tool_type_dict[tool_type]["description"])
for tool in tool_type_dict[tool_type]["tools"]:
EPILOG += "{0:5}{1:20}{2}\n".format("", tool, TOOL_DICT[tool]["module"].DESCRIPTION)
EPILOG += "\n"
EPILOG += "\n"
def main():
update_epilog()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
#Setup the status message
s = status.Status()
s.set_level(status.StatusLevel.INFO)
#Global Flags
parser.add_argument("-v", "--verbose", action='store_true', help="Output verbose information")
parser.add_argument("-d", "--debug", action='store_true', help="Output test debug information")
subparsers = parser.add_subparsers( title = "Tools",
description = "Nysa Tools",
metavar = None,
dest = "tool")
for tool in TOOL_DICT:
p = subparsers.add_parser(tool,
description=TOOL_DICT[tool]["module"].DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
TOOL_DICT[tool]["module"].setup_parser(p)
TOOL_DICT[tool]["parser"] = p
#Parse the arguments
if COMPLETER_EXTRACTOR:
ce(parser, TEMP_BASH_COMPLETER_FILEPATH)
return
args = parser.parse_args()
if args.debug:
s.set_level(status.StatusLevel.DEBUG)
if args.verbose:
s.set_level(status.StatusLevel.VERBOSE)
#print "args: %s" % str(args)
#print "args dict: %s" % str(dir(args))
TOOL_DICT[args.tool]["tool"](args, s)
| mit | 2,666,145,066,565,960,000 | 28.970085 | 125 | 0.62541 | false |
torebutlin/cued_datalogger | docs/source/conf.py | 1 | 5181 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CUED DataLogger documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 16 12:51:29 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../cued_datalogger'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
#'sphinx.ext.napoleon']
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CUED DataLogger'
copyright = '2017, Theo Brown, En Yi Tee'
author = 'Theo Brown, En Yi Tee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'python3'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CUEDDataLoggerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CUEDDataLogger.tex', 'CUED DataLogger Documentation',
'Theo Brown, En Yi Tee', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cued_datalogger', 'CUED DataLogger Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CUEDDataLogger', 'CUED DataLogger Documentation',
author, 'CUEDDataLogger', 'One line description of project.',
'Miscellaneous'),
]
#-----------Options put in by Theo--------------------------------------------
autodoc_mock_imports = ['pyaudio', 'pyDAQmx']
autodoc_default_flags = ['show-inheritance']
autodoc_member_order = 'alphabetical'
| bsd-3-clause | -1,848,729,279,356,678,000 | 30.023952 | 79 | 0.670334 | false |
smyrman/django-jquery-widgets | jquery_widgets/admin.py | 1 | 3264 | # -*- coding: utf-8 -*-
# Based on code from: Jannis Leidal, 2008 (http://jannisleidel.com/),
# Copyright (C) 2010: Sindre Røkenes Myren,
# This file is part of KiKrit wich is distrebuted under GPLv3. See the file
# COPYING.txt for more details.
import operator
from django.db import models
from django.http import HttpResponse, HttpResponseNotFound
from django.contrib import admin
from django.contrib.admin.options import BaseModelAdmin
from django.utils.encoding import smart_str
from jquery_widgets.widgets import *
__all__ = ('JQWAdminMixin', 'ExtendedModelAdmin')
class JQWAdminMixin(object):
"""Enables you to configure jQury UI widgets in the admin.
jqw_autocomplete_fields
=======================
For fields of type 'ForeignKey' and 'ManyToMany', you can configure the
'jqw_autocomplete_fields' with entries of type::
'<field_name>' : ('<lookup_field1>', '<lookup_field2>'),
or::
'<field_name>' : JQWAdminMixin.LOOKUP_CHOICES,
For any other field type where you have configured 'choices', you may add
entires of the latest type only.
Example
-------
::
jqw_autocomplete_fields = {
'user': ('username', 'email'),
'group': JQWAdminMixin.LOOKUP_CHOICES,
}
jqw_radio_fields
================
WARNING: Currently works kind of crap in good in the admin!
Any field with a choices attribut can be listed as in 'jqw_radio_fields'
with entires of type::
'<field_name>': <allignment>,
Note that this syntax is identical to the the existing ModelAdmin's
'radio_fields'. Also note that currently, the <allignment> parameter is
ignored.
Example
-------
::
jqw_radio_fields = {
'gender': JQWAdminMixin.HORIZONTAL
}
"""
LOOKUP_CHOICES = 1
HORIZONTAL = admin.HORIZONTAL
VERTICAL = admin.VERTICAL
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.jqw_autocomplete_fields:
lookup = self.jqw_autocomplete_fields[db_field.name]
if lookup == self.LOOKUP_CHOICES:
kwargs['widget'] = JQWAutocompleteSelect(
choices=db_field.get_choices(include_blank=False),
theme='ui-admin',
#theme=settings.JQUERY_UI_THEME['admin'],
use_admin_icons=True,
)
elif isinstance(db_field, models.ForeignKey):
kwargs['widget'] = JQWAutocompleteFKSelect(
rel=db_field.rel,
lookup_fields=self.jqw_autocomplete_fields[db_field.name],
theme='ui-admin',
#theme=settings.JQUERY_UI_THEME['admin'],
use_admin_icons=True,
)
elif isinstance(db_field, models.ManyToManyField):
# FIXME
pass
elif db_field.name in self.jqw_radio_fields:
align = self.jqw_radio_fields[db_field.name]
kwargs['widget'] = JQWRadioSelect(
theme='ui-admin',
#theme=settings.JQUERY_UI_THEME['admin'],
)
return BaseModelAdmin.formfield_for_dbfield(self, db_field, **kwargs)
#### Classes kept for bacward compabillity only ###
class ExtendedModelAdmin(JQWAdminMixin, admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
# 'related_search_fields' has been deprecated in favour of
# 'jqw_autocomplete_fields'.
if hasattr(self, "related_search_fields"):
self.jqw_autocomplete_fields = self.related_search_fields
return super(ExtendedModelAdmin, self).formfield_for_dbfield(db_field,
**kwargs)
| mit | -4,657,023,323,136,276,000 | 28.133929 | 75 | 0.703953 | false |
openstack/os-win | os_win/utils/network/networkutils.py | 1 | 42102 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for network related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import functools
import re
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import units
import six
from os_win._i18n import _
from os_win import conf
from os_win import constants
from os_win import exceptions
from os_win.utils import _wqlutils
from os_win.utils import baseutils
from os_win.utils import jobutils
CONF = conf.CONF
LOG = logging.getLogger(__name__)
_PORT_PROFILE_ATTR_MAP = {
"profile_id": "ProfileId",
"profile_data": "ProfileData",
"profile_name": "ProfileName",
"net_cfg_instance_id": "NetCfgInstanceId",
"cdn_label_id": "CdnLabelId",
"cdn_label_string": "CdnLabelString",
"vendor_id": "VendorId",
"vendor_name": "VendorName",
}
class NetworkUtils(baseutils.BaseUtilsVirt):
EVENT_TYPE_CREATE = "__InstanceCreationEvent"
EVENT_TYPE_DELETE = "__InstanceDeletionEvent"
_VNIC_SET_DATA = 'Msvm_SyntheticEthernetPortSettingData'
_EXTERNAL_PORT = 'Msvm_ExternalEthernetPort'
_ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort'
_PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData'
_PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData'
_PORT_PROFILE_SET_DATA = 'Msvm_EthernetSwitchPortProfileSettingData'
_PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData'
_PORT_HW_OFFLOAD_SET_DATA = 'Msvm_EthernetSwitchPortOffloadSettingData'
_PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_PORT_BANDWIDTH_SET_DATA = 'Msvm_EthernetSwitchPortBandwidthSettingData'
_PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA
_LAN_ENDPOINT = 'Msvm_LANEndpoint'
_STATE_DISABLED = 3
_VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_VM_SUMMARY_ENABLED_STATE = 100
_HYPERV_VM_STATE_ENABLED = 2
_OFFLOAD_ENABLED = 100
_OFFLOAD_DISABLED = 0
_ACL_DIR_IN = 1
_ACL_DIR_OUT = 2
_ACL_TYPE_IPV4 = 2
_ACL_TYPE_IPV6 = 3
_ACL_ACTION_ALLOW = 1
_ACL_ACTION_DENY = 2
_ACL_ACTION_METER = 3
_ACL_APPLICABILITY_LOCAL = 1
_ACL_APPLICABILITY_REMOTE = 2
_ACL_DEFAULT = 'ANY'
_IPV4_ANY = '0.0.0.0/0'
_IPV6_ANY = '::/0'
_TCP_PROTOCOL = 'tcp'
_UDP_PROTOCOL = 'udp'
_ICMP_PROTOCOL = '1'
_ICMPV6_PROTOCOL = '58'
_MAX_WEIGHT = 65500
# 2 directions x 2 address types = 4 ACLs
_REJECT_ACLS_COUNT = 4
_VNIC_LISTENER_TIMEOUT_MS = 2000
_switches = {}
_switch_ports = {}
_vlan_sds = {}
_profile_sds = {}
_hw_offload_sds = {}
_vsid_sds = {}
_sg_acl_sds = {}
_bandwidth_sds = {}
def __init__(self):
super(NetworkUtils, self).__init__()
self._jobutils = jobutils.JobUtils()
self._enable_cache = CONF.os_win.cache_temporary_wmi_objects
def init_caches(self):
if not self._enable_cache:
LOG.info('WMI caching is disabled.')
return
for vswitch in self._conn.Msvm_VirtualEthernetSwitch():
self._switches[vswitch.ElementName] = vswitch
# map between switch port ID and switch port WMI object.
for port in self._conn.Msvm_EthernetPortAllocationSettingData():
self._switch_ports[port.ElementName] = port
# VLAN and VSID setting data's InstanceID will contain the switch
# port's InstanceID.
switch_port_id_regex = re.compile(
"Microsoft:[0-9A-F-]*\\\\[0-9A-F-]*\\\\[0-9A-F-]",
flags=re.IGNORECASE)
# map between switch port's InstanceID and their Port Profile settings
# data WMI objects.
for profile in self._conn.Msvm_EthernetSwitchPortProfileSettingData():
match = switch_port_id_regex.match(profile.InstanceID)
if match:
self._profile_sds[match.group()] = profile
# map between switch port's InstanceID and their VLAN setting data WMI
# objects.
for vlan_sd in self._conn.Msvm_EthernetSwitchPortVlanSettingData():
match = switch_port_id_regex.match(vlan_sd.InstanceID)
if match:
self._vlan_sds[match.group()] = vlan_sd
# map between switch port's InstanceID and their VSID setting data WMI
# objects.
for vsid_sd in self._conn.Msvm_EthernetSwitchPortSecuritySettingData():
match = switch_port_id_regex.match(vsid_sd.InstanceID)
if match:
self._vsid_sds[match.group()] = vsid_sd
# map between switch port's InstanceID and their bandwidth setting
# data WMI objects.
bandwidths = self._conn.Msvm_EthernetSwitchPortBandwidthSettingData()
for bandwidth_sd in bandwidths:
match = switch_port_id_regex.match(bandwidth_sd.InstanceID)
if match:
self._bandwidth_sds[match.group()] = bandwidth_sd
# map between switch port's InstanceID and their HW offload setting
# data WMI objects.
hw_offloads = self._conn.Msvm_EthernetSwitchPortOffloadSettingData()
for hw_offload_sd in hw_offloads:
match = switch_port_id_regex.match(hw_offload_sd.InstanceID)
if match:
self._hw_offload_sds[match.group()] = hw_offload_sd
def update_cache(self):
if not self._enable_cache:
return
# map between switch port ID and switch port WMI object.
self._switch_ports.clear()
for port in self._conn.Msvm_EthernetPortAllocationSettingData():
self._switch_ports[port.ElementName] = port
def clear_port_sg_acls_cache(self, switch_port_name):
self._sg_acl_sds.pop(switch_port_name, None)
def get_vswitch_id(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
return vswitch.Name
def get_vswitch_extensions(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
extensions = self._conn.Msvm_EthernetSwitchExtension(
SystemName=vswitch.Name)
dict_ext_list = [
{'name': ext.ElementName,
'version': ext.Version,
'vendor': ext.Vendor,
'description': ext.Description,
'enabled_state': ext.EnabledState,
'extension_type': ext.ExtensionType}
for ext in extensions]
return dict_ext_list
def get_vswitch_external_network_name(self, vswitch_name):
ext_port = self._get_vswitch_external_port(vswitch_name)
if ext_port:
return ext_port.ElementName
def _get_vswitch(self, vswitch_name):
if vswitch_name in self._switches:
return self._switches[vswitch_name]
vswitch = self._conn.Msvm_VirtualEthernetSwitch(
ElementName=vswitch_name)
if not vswitch:
raise exceptions.HyperVvSwitchNotFound(vswitch_name=vswitch_name)
if self._enable_cache:
self._switches[vswitch_name] = vswitch[0]
return vswitch[0]
def _get_vswitch_external_port(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
ext_ports = self._conn.Msvm_ExternalEthernetPort()
for ext_port in ext_ports:
lan_endpoint_assoc_list = (
self._conn.Msvm_EthernetDeviceSAPImplementation(
Antecedent=ext_port.path_()))
if lan_endpoint_assoc_list:
lan_endpoint_assoc_list = self._conn.Msvm_ActiveConnection(
Dependent=lan_endpoint_assoc_list[0].Dependent.path_())
if lan_endpoint_assoc_list:
lan_endpoint = lan_endpoint_assoc_list[0].Antecedent
if lan_endpoint.SystemName == vswitch.Name:
return ext_port
def vswitch_port_needed(self):
return False
def get_switch_ports(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
vswitch_ports = self._conn.Msvm_EthernetSwitchPort(
SystemName=vswitch.Name)
return set(p.Name for p in vswitch_ports)
def get_port_by_id(self, port_id, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
switch_ports = self._conn.Msvm_EthernetSwitchPort(
SystemName=vswitch.Name)
for switch_port in switch_ports:
if (switch_port.ElementName == port_id):
return switch_port
def vnic_port_exists(self, port_id):
try:
self._get_vnic_settings(port_id)
except Exception:
return False
return True
def get_vnic_ids(self):
return set(
p.ElementName
for p in self._conn.Msvm_SyntheticEthernetPortSettingData()
if p.ElementName is not None)
def get_vnic_mac_address(self, switch_port_name):
vnic = self._get_vnic_settings(switch_port_name)
return vnic.Address
def _get_vnic_settings(self, vnic_name):
vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=vnic_name)
if not vnic_settings:
raise exceptions.HyperVvNicNotFound(vnic_name=vnic_name)
return vnic_settings[0]
def get_vnic_event_listener(self, event_type):
query = self._get_event_wql_query(cls=self._VNIC_SET_DATA,
event_type=event_type,
timeframe=2)
listener = self._conn.Msvm_SyntheticEthernetPortSettingData.watch_for(
query)
def _poll_events(callback):
if patcher.is_monkey_patched('thread'):
listen = functools.partial(tpool.execute, listener,
self._VNIC_LISTENER_TIMEOUT_MS)
else:
listen = functools.partial(listener,
self._VNIC_LISTENER_TIMEOUT_MS)
while True:
# Retrieve one by one all the events that occurred in
# the checked interval.
try:
event = listen()
if event.ElementName:
callback(event.ElementName)
else:
LOG.warning("Ignoring port event. "
"The port name is missing.")
except exceptions.x_wmi_timed_out:
# no new event published.
pass
return _poll_events
def _get_event_wql_query(self, cls, event_type, timeframe=2, **where):
"""Return a WQL query used for polling WMI events.
:param cls: the Hyper-V class polled for events.
:param event_type: the type of event expected.
:param timeframe: check for events that occurred in
the specified timeframe.
:param where: key-value arguments which are to be included in the
query. For example: like=dict(foo="bar").
"""
like = where.pop('like', {})
like_str = " AND ".join("TargetInstance.%s LIKE '%s%%'" % (k, v)
for k, v in like.items())
like_str = "AND " + like_str if like_str else ""
query = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' %(like)s" % {
'class': cls,
'event_type': event_type,
'like': like_str,
'timeframe': timeframe})
return query
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
port, found = self._get_switch_port_allocation(
switch_port_name, create=True, expected=False)
if found and port.HostResource and port.HostResource[0]:
# vswitch port already exists and is connected to vswitch.
return
vswitch = self._get_vswitch(vswitch_name)
vnic = self._get_vnic_settings(switch_port_name)
port.HostResource = [vswitch.path_()]
port.Parent = vnic.path_()
if not found:
vm = self._get_vm_from_res_setting_data(vnic)
self._jobutils.add_virt_resource(port, vm)
else:
self._jobutils.modify_virt_resource(port)
def _get_vm_from_res_setting_data(self, res_setting_data):
vmsettings_instance_id = res_setting_data.InstanceID.split('\\')[0]
sd = self._conn.Msvm_VirtualSystemSettingData(
InstanceID=vmsettings_instance_id)
vm = self._conn.Msvm_ComputerSystem(Name=sd[0].ConfigurationID)
return vm[0]
def remove_switch_port(self, switch_port_name, vnic_deleted=False):
"""Removes the switch port."""
sw_port, found = self._get_switch_port_allocation(switch_port_name,
expected=False)
if not sw_port:
# Port not found. It happens when the VM was already deleted.
return
if not vnic_deleted:
try:
self._jobutils.remove_virt_resource(sw_port)
except exceptions.x_wmi:
# port may have already been destroyed by Hyper-V
pass
self._switch_ports.pop(switch_port_name, None)
self._profile_sds.pop(sw_port.InstanceID, None)
self._vlan_sds.pop(sw_port.InstanceID, None)
self._vsid_sds.pop(sw_port.InstanceID, None)
self._bandwidth_sds.pop(sw_port.InstanceID, None)
self._hw_offload_sds.pop(sw_port.InstanceID, None)
def set_vswitch_port_profile_id(self, switch_port_name, profile_id,
profile_data, profile_name, vendor_name,
**kwargs):
"""Sets up the port profile id.
:param switch_port_name: The ElementName of the vSwitch port.
:param profile_id: The profile id to be set for the given switch port.
:param profile_data: Additional data for the Port Profile.
:param profile_name: The name of the Port Profile.
:param net_cfg_instance_id: Unique device identifier of the
sub-interface.
:param cdn_label_id: The CDN Label Id.
:param cdn_label_string: The CDN label string.
:param vendor_id: The id of the Vendor defining the profile.
:param vendor_name: The name of the Vendor defining the profile.
"""
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
port_profile = self._get_profile_setting_data_from_port_alloc(
port_alloc)
new_port_profile = self._prepare_profile_sd(
profile_id=profile_id, profile_data=profile_data,
profile_name=profile_name, vendor_name=vendor_name, **kwargs)
if port_profile:
# Removing the feature because it cannot be modified
# due to a wmi exception.
self._jobutils.remove_virt_feature(port_profile)
# remove from cache.
self._profile_sds.pop(port_alloc.InstanceID, None)
try:
self._jobutils.add_virt_feature(new_port_profile, port_alloc)
except Exception as ex:
raise exceptions.HyperVException(
'Unable to set port profile settings %(port_profile)s '
'for port %(port)s. Error: %(error)s' %
dict(port_profile=new_port_profile, port=port_alloc, error=ex))
def set_vswitch_port_vlan_id(self, vlan_id=None, switch_port_name=None,
**kwargs):
"""Sets up operation mode, VLAN ID and VLAN trunk for the given port.
:param vlan_id: the VLAN ID to be set for the given switch port.
:param switch_port_name: the ElementName of the vSwitch port.
:param operation_mode: the VLAN operation mode. The acceptable values
are:
os_win.constants.VLAN_MODE_ACCESS, os_win.constants.VLAN_TRUNK_MODE
If not given, VLAN_MODE_ACCESS is used by default.
:param trunk_vlans: an array of VLAN IDs to be set in trunk mode.
:raises AttributeError: if an unsupported operation_mode is given, or
the given operation mode is VLAN_MODE_ACCESS and the given
trunk_vlans is not None.
"""
operation_mode = kwargs.get('operation_mode',
constants.VLAN_MODE_ACCESS)
trunk_vlans = kwargs.get('trunk_vlans')
if operation_mode not in [constants.VLAN_MODE_ACCESS,
constants.VLAN_MODE_TRUNK]:
msg = _('Unsupported VLAN operation mode: %s')
raise AttributeError(msg % operation_mode)
if (operation_mode == constants.VLAN_MODE_ACCESS and
trunk_vlans is not None):
raise AttributeError(_('The given operation mode is ACCESS, '
'cannot set given trunk_vlans.'))
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
if operation_mode == constants.VLAN_MODE_ACCESS:
new_vlan_settings = self._prepare_vlan_sd_access_mode(
vlan_settings, vlan_id)
else:
new_vlan_settings = self._prepare_vlan_sd_trunk_mode(
vlan_settings, vlan_id, trunk_vlans)
if not new_vlan_settings:
# if no object was returned, it means that the VLAN Setting Data
# was already added with the desired attributes.
return
if vlan_settings:
# Removing the feature because it cannot be modified
# due to a wmi exception.
self._jobutils.remove_virt_feature(vlan_settings)
# remove from cache.
self._vlan_sds.pop(port_alloc.InstanceID, None)
self._jobutils.add_virt_feature(new_vlan_settings, port_alloc)
# TODO(claudiub): This will help solve the missing VLAN issue, but it
# comes with a performance cost. The root cause of the problem must
# be solved.
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
if not vlan_settings:
raise exceptions.HyperVException(
_('Port VLAN not found: %s') % switch_port_name)
def _prepare_profile_sd(self, **kwargs):
profile_id_settings = self._create_default_setting_data(
self._PORT_PROFILE_SET_DATA)
for argument_name, attr_name in _PORT_PROFILE_ATTR_MAP.items():
attribute = kwargs.pop(argument_name, None)
if attribute is None:
continue
setattr(profile_id_settings, attr_name, attribute)
if kwargs:
raise TypeError("Unrecognized attributes %r" % kwargs)
return profile_id_settings
def _prepare_vlan_sd_access_mode(self, vlan_settings, vlan_id):
if vlan_settings:
# the given vlan_id might be None.
vlan_id = vlan_id or vlan_settings.AccessVlanId
if (vlan_settings.OperationMode == constants.VLAN_MODE_ACCESS and
vlan_settings.AccessVlanId == vlan_id):
# VLAN already set to correct value, no need to change it.
return None
vlan_settings = self._create_default_setting_data(
self._PORT_VLAN_SET_DATA)
vlan_settings.AccessVlanId = vlan_id
vlan_settings.OperationMode = constants.VLAN_MODE_ACCESS
return vlan_settings
def _prepare_vlan_sd_trunk_mode(self, vlan_settings, vlan_id, trunk_vlans):
if vlan_settings:
# the given vlan_id might be None.
vlan_id = vlan_id or vlan_settings.NativeVlanId
trunk_vlans = trunk_vlans or vlan_settings.TrunkVlanIdArray or []
trunk_vlans = sorted(trunk_vlans)
if (vlan_settings.OperationMode == constants.VLAN_MODE_TRUNK and
vlan_settings.NativeVlanId == vlan_id and
sorted(vlan_settings.TrunkVlanIdArray) == trunk_vlans):
# VLAN already set to correct value, no need to change it.
return None
vlan_settings = self._create_default_setting_data(
self._PORT_VLAN_SET_DATA)
vlan_settings.NativeVlanId = vlan_id
vlan_settings.TrunkVlanIdArray = trunk_vlans
vlan_settings.OperationMode = constants.VLAN_MODE_TRUNK
return vlan_settings
def set_vswitch_port_vsid(self, vsid, switch_port_name):
self._set_switch_port_security_settings(switch_port_name,
VirtualSubnetId=vsid)
def set_vswitch_port_mac_spoofing(self, switch_port_name, state):
"""Sets the given port's MAC spoofing to the given state.
:param switch_port_name: the name of the port which will have MAC
spoofing set to the given state.
:param state: boolean, if MAC spoofing should be turned on or off.
"""
self._set_switch_port_security_settings(switch_port_name,
AllowMacSpoofing=state)
def _set_switch_port_security_settings(self, switch_port_name, **kwargs):
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
sec_settings = self._get_security_setting_data_from_port_alloc(
port_alloc)
exists = sec_settings is not None
if exists:
if all(getattr(sec_settings, k) == v for k, v in kwargs.items()):
# All desired properties already properly set. Nothing to do.
return
else:
sec_settings = self._create_default_setting_data(
self._PORT_SECURITY_SET_DATA)
for k, v in kwargs.items():
setattr(sec_settings, k, v)
if exists:
self._jobutils.modify_virt_feature(sec_settings)
else:
self._jobutils.add_virt_feature(sec_settings, port_alloc)
# TODO(claudiub): This will help solve the missing VSID issue, but it
# comes with a performance cost. The root cause of the problem must
# be solved.
sec_settings = self._get_security_setting_data_from_port_alloc(
port_alloc)
if not sec_settings:
raise exceptions.HyperVException(
_('Port Security Settings not found: %s') % switch_port_name)
def set_vswitch_port_sriov(self, switch_port_name, enabled):
"""Enables / Disables SR-IOV for the given port.
:param switch_port_name: the name of the port which will have SR-IOV
enabled or disabled.
:param enabled: boolean, if SR-IOV should be turned on or off.
"""
# TODO(claudiub): We have added a different method that sets all sorts
# of offloading options on a vswitch port, including SR-IOV.
# Remove this method in S.
self.set_vswitch_port_offload(switch_port_name, sriov_enabled=enabled)
def set_vswitch_port_offload(self, switch_port_name, sriov_enabled=None,
iov_queues_requested=None, vmq_enabled=None,
offloaded_sa=None):
"""Enables / Disables different offload options for the given port.
Optional prameters are ignored if they are None.
:param switch_port_name: the name of the port which will have VMQ
enabled or disabled.
:param sriov_enabled: if SR-IOV should be turned on or off.
:param iov_queues_requested: the number of IOV queues to use. (> 1)
:param vmq_enabled: if VMQ should be turned on or off.
:param offloaded_sa: the number of IPsec SA offloads to use. (> 1)
:raises os_win.exceptions.InvalidParameterValue: if an invalid value
is passed for the iov_queues_requested or offloaded_sa parameters.
"""
if iov_queues_requested is not None and iov_queues_requested < 1:
raise exceptions.InvalidParameterValue(
param_name='iov_queues_requested',
param_value=iov_queues_requested)
if offloaded_sa is not None and offloaded_sa < 1:
raise exceptions.InvalidParameterValue(
param_name='offloaded_sa',
param_value=offloaded_sa)
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
# NOTE(claudiub): All ports have a HW offload SD.
hw_offload_sd = self._get_hw_offload_sd_from_port_alloc(port_alloc)
sd_changed = False
if sriov_enabled is not None:
desired_state = (self._OFFLOAD_ENABLED if sriov_enabled else
self._OFFLOAD_DISABLED)
if hw_offload_sd.IOVOffloadWeight != desired_state:
hw_offload_sd.IOVOffloadWeight = desired_state
sd_changed = True
if iov_queues_requested is not None:
if hw_offload_sd.IOVQueuePairsRequested != iov_queues_requested:
hw_offload_sd.IOVQueuePairsRequested = iov_queues_requested
sd_changed = True
if vmq_enabled is not None:
desired_state = (self._OFFLOAD_ENABLED if vmq_enabled else
self._OFFLOAD_DISABLED)
if hw_offload_sd.VMQOffloadWeight != desired_state:
hw_offload_sd.VMQOffloadWeight = desired_state
sd_changed = True
if offloaded_sa is not None:
if hw_offload_sd.IPSecOffloadLimit != offloaded_sa:
hw_offload_sd.IPSecOffloadLimit = offloaded_sa
sd_changed = True
# NOTE(claudiub): The HW offload SD can simply be modified. No need to
# remove it and create a new one.
if sd_changed:
self._jobutils.modify_virt_feature(hw_offload_sd)
def _get_profile_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._profile_sds, self._PORT_PROFILE_SET_DATA)
def _get_vlan_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._vlan_sds, self._PORT_VLAN_SET_DATA)
def _get_security_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._vsid_sds, self._PORT_SECURITY_SET_DATA)
def _get_hw_offload_sd_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._hw_offload_sds, self._PORT_HW_OFFLOAD_SET_DATA)
def _get_bandwidth_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._bandwidth_sds, self._PORT_BANDWIDTH_SET_DATA)
def _get_setting_data_from_port_alloc(self, port_alloc, cache, data_class):
if port_alloc.InstanceID in cache:
return cache[port_alloc.InstanceID]
setting_data = self._get_first_item(
_wqlutils.get_element_associated_class(
self._conn, data_class,
element_instance_id=port_alloc.InstanceID))
if setting_data and self._enable_cache:
cache[port_alloc.InstanceID] = setting_data
return setting_data
def _get_switch_port_allocation(self, switch_port_name, create=False,
expected=True):
if switch_port_name in self._switch_ports:
return self._switch_ports[switch_port_name], True
switch_port, found = self._get_setting_data(
self._PORT_ALLOC_SET_DATA,
switch_port_name, create)
if found:
# newly created setting data cannot be cached, they do not
# represent real objects yet.
# if it was found, it means that it was not created.
if self._enable_cache:
self._switch_ports[switch_port_name] = switch_port
elif expected:
raise exceptions.HyperVPortNotFoundException(
port_name=switch_port_name)
return switch_port, found
def _get_setting_data(self, class_name, element_name, create=True):
element_name = element_name.replace("'", '"')
q = self._compat_conn.query("SELECT * FROM %(class_name)s WHERE "
"ElementName = '%(element_name)s'" %
{"class_name": class_name,
"element_name": element_name})
data = self._get_first_item(q)
found = data is not None
if not data and create:
data = self._get_default_setting_data(class_name)
data.ElementName = element_name
return data, found
def _get_default_setting_data(self, class_name):
return self._compat_conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _create_default_setting_data(self, class_name):
return getattr(self._compat_conn, class_name).new()
def _get_first_item(self, obj):
if obj:
return obj[0]
def add_metrics_collection_acls(self, switch_port_name):
port = self._get_switch_port_allocation(switch_port_name)[0]
# Add the ACLs only if they don't already exist
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
element_instance_id=port.InstanceID)
for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]:
for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]:
_acls = self._filter_acls(
acls, self._ACL_ACTION_METER, acl_dir, acl_type)
if not _acls:
acl = self._create_acl(
acl_dir, acl_type, self._ACL_ACTION_METER)
self._jobutils.add_virt_feature(acl, port)
def is_metrics_collection_allowed(self, switch_port_name):
port = self._get_switch_port_allocation(switch_port_name)[0]
if not self._is_port_vm_started(port):
return False
# all 4 meter ACLs must be existent first. (2 x direction)
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
element_instance_id=port.InstanceID)
acls = [a for a in acls if a.Action == self._ACL_ACTION_METER]
if len(acls) < 2:
return False
return True
def _is_port_vm_started(self, port):
vmsettings_instance_id = port.InstanceID.split('\\')[0]
vmsettings = self._conn.Msvm_VirtualSystemSettingData(
InstanceID=vmsettings_instance_id)
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = self._vs_man_svc.GetSummaryInformation(
[self._VM_SUMMARY_ENABLED_STATE],
[v.path_() for v in vmsettings])
if ret_val or not summary_info:
raise exceptions.HyperVException(_('Cannot get VM summary data '
'for: %s') % port.ElementName)
return summary_info[0].EnabledState == self._HYPERV_VM_STATE_ENABLED
def create_security_rules(self, switch_port_name, sg_rules):
port = self._get_switch_port_allocation(switch_port_name)[0]
self._bind_security_rules(port, sg_rules)
def remove_security_rules(self, switch_port_name, sg_rules):
port = self._get_switch_port_allocation(switch_port_name)[0]
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
remove_acls = []
for sg_rule in sg_rules:
filtered_acls = self._filter_security_acls(sg_rule, acls)
remove_acls.extend(filtered_acls)
if remove_acls:
self._jobutils.remove_multiple_virt_features(remove_acls)
# remove the old ACLs from the cache.
new_acls = [a for a in acls if a not in remove_acls]
self._sg_acl_sds[port.ElementName] = new_acls
def remove_all_security_rules(self, switch_port_name):
port = self._get_switch_port_allocation(switch_port_name)[0]
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
filtered_acls = [a for a in acls if
a.Action != self._ACL_ACTION_METER]
if filtered_acls:
self._jobutils.remove_multiple_virt_features(filtered_acls)
# clear the cache.
self._sg_acl_sds[port.ElementName] = []
def _bind_security_rules(self, port, sg_rules):
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
# Add the ACL only if it don't already exist.
add_acls = []
processed_sg_rules = []
weights = self._get_new_weights(sg_rules, acls)
index = 0
for sg_rule in sg_rules:
filtered_acls = self._filter_security_acls(sg_rule, acls)
if filtered_acls:
# ACL already exists.
continue
acl = self._create_security_acl(sg_rule, weights[index])
add_acls.append(acl)
index += 1
# append sg_rule the acls list, to make sure that the same rule
# is not processed twice.
processed_sg_rules.append(sg_rule)
if add_acls:
self._jobutils.add_multiple_virt_features(add_acls, port)
# caching the Security Group Rules that have been processed and
# added to the port. The list should only be used to check the
# existence of rules, nothing else.
acls.extend(processed_sg_rules)
def _get_port_security_acls(self, port):
"""Returns a mutable list of Security Group Rule objects.
Returns the list of Security Group Rule objects from the cache,
otherwise it fetches and caches from the port's associated class.
"""
if port.ElementName in self._sg_acl_sds:
return self._sg_acl_sds[port.ElementName]
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
if self._enable_cache:
self._sg_acl_sds[port.ElementName] = acls
return acls
def _create_acl(self, direction, acl_type, action):
acl = self._create_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA)
acl.set(Direction=direction,
AclType=acl_type,
Action=action,
Applicability=self._ACL_APPLICABILITY_LOCAL)
return acl
def _create_security_acl(self, sg_rule, weight):
# Acl instance can be created new each time, the object should be
# of type ExtendedEthernetSettingsData.
acl = self._create_default_setting_data(self._PORT_EXT_ACL_SET_DATA)
acl.set(**sg_rule.to_dict())
return acl
def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""):
return [v for v in acls
if v.Action == action and
v.Direction == direction and
v.AclType == acl_type and
v.RemoteAddress == remote_addr]
def _filter_security_acls(self, sg_rule, acls):
return [a for a in acls if sg_rule == a]
def _get_new_weights(self, sg_rules, existent_acls):
"""Computes the weights needed for given sg_rules.
:param sg_rules: ACLs to be added. They must have the same Action.
:existent_acls: ACLs already bound to a switch port.
:return: list of weights which will be used to create ACLs. List will
have the recommended order for sg_rules' Action.
"""
return [0] * len(sg_rules)
def set_port_qos_rule(self, port_id, qos_rule):
"""Sets the QoS rule for the given port.
:param port_id: the port's ID to which the QoS rule will be applied to.
:param qos_rule: a dictionary containing the following keys:
min_kbps, max_kbps, max_burst_kbps, max_burst_size_kb.
:raises exceptions.HyperVInvalidException: if
- min_kbps is smaller than 10MB.
- max_kbps is smaller than min_kbps.
- max_burst_kbps is smaller than max_kbps.
:raises exceptions.HyperVException: if the QoS rule cannot be set.
"""
# Hyper-V stores bandwidth limits in bytes.
min_bps = qos_rule.get("min_kbps", 0) * units.Ki
max_bps = qos_rule.get("max_kbps", 0) * units.Ki
max_burst_bps = qos_rule.get("max_burst_kbps", 0) * units.Ki
max_burst_sz = qos_rule.get("max_burst_size_kb", 0) * units.Ki
if not (min_bps or max_bps or max_burst_bps or max_burst_sz):
# no limits need to be set
return
if min_bps and min_bps < 10 * units.Mi:
raise exceptions.InvalidParameterValue(
param_name="min_kbps", param_value=min_bps)
if max_bps and max_bps < min_bps:
raise exceptions.InvalidParameterValue(
param_name="max_kbps", param_value=max_bps)
if max_burst_bps and max_burst_bps < max_bps:
raise exceptions.InvalidParameterValue(
param_name="max_burst_kbps", param_value=max_burst_bps)
port_alloc = self._get_switch_port_allocation(port_id)[0]
bandwidth = self._get_bandwidth_setting_data_from_port_alloc(
port_alloc)
if bandwidth:
# Removing the feature because it cannot be modified
# due to a wmi exception.
self._jobutils.remove_virt_feature(bandwidth)
# remove from cache.
self._bandwidth_sds.pop(port_alloc.InstanceID, None)
bandwidth = self._get_default_setting_data(
self._PORT_BANDWIDTH_SET_DATA)
bandwidth.Reservation = min_bps
bandwidth.Limit = max_bps
bandwidth.BurstLimit = max_burst_bps
bandwidth.BurstSize = max_burst_sz
try:
self._jobutils.add_virt_feature(bandwidth, port_alloc)
except Exception as ex:
if '0x80070057' in six.text_type(ex):
raise exceptions.InvalidParameterValue(
param_name="qos_rule", param_value=qos_rule)
raise exceptions.HyperVException(
'Unable to set qos rule %(qos_rule)s for port %(port)s. '
'Error: %(error)s' %
dict(qos_rule=qos_rule, port=port_alloc, error=ex))
def remove_port_qos_rule(self, port_id):
"""Removes the QoS rule from the given port.
:param port_id: the port's ID from which the QoS rule will be removed.
"""
port_alloc = self._get_switch_port_allocation(port_id)[0]
bandwidth = self._get_bandwidth_setting_data_from_port_alloc(
port_alloc)
if bandwidth:
self._jobutils.remove_virt_feature(bandwidth)
# remove from cache.
self._bandwidth_sds.pop(port_alloc.InstanceID, None)
class NetworkUtilsR2(NetworkUtils):
_PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData'
_MAX_WEIGHT = 65500
# 2 directions x 2 address types x 4 protocols = 16 ACLs
_REJECT_ACLS_COUNT = 16
def _create_security_acl(self, sg_rule, weight):
acl = super(NetworkUtilsR2, self)._create_security_acl(sg_rule,
weight)
acl.Weight = weight
sg_rule.Weight = weight
return acl
def _get_new_weights(self, sg_rules, existent_acls):
sg_rule = sg_rules[0]
num_rules = len(sg_rules)
existent_acls = [a for a in existent_acls
if a.Action == sg_rule.Action]
if not existent_acls:
if sg_rule.Action == self._ACL_ACTION_DENY:
return list(range(1, 1 + num_rules))
else:
return list(range(self._MAX_WEIGHT - 1,
self._MAX_WEIGHT - 1 - num_rules, - 1))
# there are existent ACLs.
weights = [a.Weight for a in existent_acls]
if sg_rule.Action == self._ACL_ACTION_DENY:
return [i for i in list(range(1, self._REJECT_ACLS_COUNT + 1))
if i not in weights][:num_rules]
min_weight = min(weights)
last_weight = min_weight - num_rules - 1
if last_weight > self._REJECT_ACLS_COUNT:
return list(range(min_weight - 1, last_weight, - 1))
# not enough weights. Must search for available weights.
# if it is this case, num_rules is a small number.
current_weight = self._MAX_WEIGHT - 1
new_weights = []
for i in list(range(num_rules)):
while current_weight in weights:
current_weight -= 1
new_weights.append(current_weight)
return new_weights
| apache-2.0 | 3,369,183,737,832,306,000 | 40.155425 | 79 | 0.600898 | false |
google-research/language | language/emql/cm_sketch.py | 1 | 5160 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of Count-Min Sketch.
Implement a count-min sketch module that can create count-min sketch, check
membership of an element, and compute intersection and union of the sketches
of two sets.
"""
from absl import app
from absl import flags
import numpy as np
from tqdm import tqdm
FLAGS = flags.FLAGS
np.random.seed(0)
class CountMinContext(object):
"""Definition of countmin sketch context.
A CountMinContext hold the information needed to construct a count-min
sketch. It caches the hash values of observed elements.
"""
def __init__(self, width, depth, n = -1):
"""Initialize the count-min sketch context.
Pre-compute the hashes of all elements if the number of elements is
known (n>0).
Args:
width: width of the cm-sketch
depth: depth of the cm-sketch
n: number of elements, -1 if it's unknown
"""
self.width = width
self.depth = depth
self.cache = dict() # cache of hash value to a list of ids
if n != -1:
for e in tqdm(range(n)):
e = str(e)
self.cache[e] = [self._hash(e, i) for i in range(self.depth)]
def _hash(self, x, i):
"""Get the i'th hash value of element x.
Args:
x: name or id in string
i: the i'th hash function
Returns:
hash result
"""
assert isinstance(x, str)
assert isinstance(i, int)
assert i >= 0 and i < self.depth
hash_val = hash((i, x))
return hash_val % self.width
def get_hashes(self, x):
"""Get the hash values of x.
Each element is hashed w times, where w is the width of the count-min
sketch specified in the constructor of CountMinContext. This function
returns w hash values of element x.
Args:
x: name or id in string
Returns:
a list of hash values with the length of depth
"""
x = str(x)
if x not in self.cache:
self.cache[x] = [self._hash(x, i) for i in range(self.depth)]
return self.cache[x]
def get_sketch(self, xs = None):
"""Return a sketch for set xs (all zeros if xs not specified).
This function takes a list of elements xs, take their hash values, and
set 1.0 to the corresponding positions. It returns a 2d numpy array
with width and depth declared in the constructor of CountMinContext.
Values at unassigned positions remain 0.
Args:
xs: a set of name or id in string
Returns:
a sketch np.array()
"""
sketch = np.zeros((self.depth, self.width), dtype=np.float32)
if xs is not None:
self.add_set(sketch, xs)
return sketch
def add(self, sketch, x):
"""Add an element to the sketch.
Args:
sketch: sketch to add x to
x: name or id in string
"""
assert isinstance(x, str)
assert self.depth, self.width == sketch.shape
if x not in self.cache:
self.cache[x] = [self._hash(x, i) for i in range(self.depth)]
for i in range(self.depth):
sketch[i, self.cache[x][i]] += 1.0
def add_set(self, sketch, xs):
"""Add a set of elements to the sketch.
Args:
sketch: sketch to add xs to
xs: a set of name or id in string
"""
assert self.depth, self.width == sketch.shape
for x in xs:
x = str(x)
if not self.contain(sketch, x):
self.add(sketch, x)
def contain(self, sketch, x):
"""Check if the sketch contains x.
Args:
sketch: sketch to add xs to
x: name or id in string
Returns:
True or False
"""
assert self.depth, self.width == sketch.shape
x = str(x)
if x not in self.cache:
self.cache[x] = [self._hash(x, i) for i in range(self.depth)]
for i in range(self.depth):
if sketch[i, self.cache[x][i]] == 0.0:
return False
return True
def intersection(self, sk1, sk2):
"""Intersect two sketches.
Args:
sk1: first sketch
sk2: second sketch
Returns:
a countmin sketch for intersection
"""
assert sk1.shape == sk2.shape
assert self.depth, self.width == sk1.shape
sk_intersection = sk1 * sk2
return sk_intersection
def union(self, sk1, sk2):
"""Union two sketches.
Args:
sk1: first sketch
sk2: second sketch
Returns:
a countmin sketch for union
"""
assert sk1.shape == sk2.shape
assert self.depth, self.width == sk1.shape
sk_union = sk1 + sk2
return sk_union
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 4,624,487,439,419,646,000 | 25.326531 | 76 | 0.642636 | false |
scottbri/PyVMAX | tests/test_symmRestApi.py | 1 | 9940 | #!/usr/bin/python
import pytest
from symmRestApi import Restful
######################################
## ADMINISTRATION Resource group
######################################
def test_administration(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_app_list(variables['URL']), list)
assert isinstance(api.start_system_backup(variables['URL']), dict)
assert isinstance(api.auth_cirrus_user(variables['URL']), dict)
assert isinstance(api.enroll_cirrus_user(variables['URL'], 'cirrus_id', TRUE), dict)
assert isinstance(api.get_sharding_info(variables['URL']), dict)
assert isinstance(api.unenroll_cirrus_user(variables['URL'], 'token'), dict)
######################################
## COMMON Resource group
######################################
def test_common(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_iterator(variables['URL'], 'iterator_id'), dict)
assert isinstance(api.delete_iterator(variables['URL'], 'iterator_id'), dict)
assert isinstance(api.get_iterator_page(variables['URL'], 'iterator_id'), dict)
######################################
## MANAGEMENT Resource group
######################################
def test_management(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_usage_stats(variables['URL']), dict)
######################################
## PERFORMANCE Resource group
######################################
######################################
## SLOPROVISIONING and PROVISIONING Resource groups
######################################
def test_provisioning(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_arrays(variables['URL']), 'SLO', list)
assert isinstance(api.get_array_directors(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_director(variables['URL'], 'SLO', 'array_id', 'director_id'), dict)
assert isinstance(api.get_array_director_ports(variables['URL'], 'SLO', 'array_id', 'director_id'), list)
assert isinstance(api.get_array_director_port(variables['URL'], 'SLO', 'array_id', 'director_id', 'port_id'), dict)
assert isinstance(api.get_array_fastpolicies(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_fastpolicy(variables['URL'], 'SLO', 'array_id', 'policy_id'), dict)
assert isinstance(api.get_array_hosts(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_host(variables['URL'], 'SLO', 'array_id', 'host_id'), dict)
assert isinstance(api.get_array_hostgroups(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_hostgroup(variables['URL'], 'SLO', 'array_id', 'hostgroup_id'), dict)
assert isinstance(api.get_array_initiators(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_initiator(variables['URL'], 'SLO', 'array_id', 'initiator_id'), dict)
assert isinstance(api.get_array_maskingviews(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_maskingview(variables['URL'], 'SLO', 'array_id', 'maskingview_id'), dict)
assert isinstance(api.get_array_maskingview_connections(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_ports(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_portgoups(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_portgroup(variables['URL'], 'SLO', 'array_id', 'portgroup_id'), dict)
assert isinstance(api.get_array_slos(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_slo(variables['URL'], 'SLO', 'array_id', 'slo_id'), dict)
assert isinstance(api.get_array_srps(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_srp(variables['URL'], 'SLO', 'array_id', 'srp_id'), dict)
assert isinstance(api.get_array_storagegroups(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_array_storagegroup(variables['URL'], 'SLO', 'array_id', 'storagegroup_id'), dict)
assert isinstance(api.get_array_volumes(variables['URL'], 'SLO', 'array_id'), list)
assert isinstance(api.get_arrays(variables['URL']), 'NOTSLO', list)
assert isinstance(api.get_array_directors(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_director(variables['URL'], 'NOTSLO', 'array_id', 'director_id'), dict)
assert isinstance(api.get_array_director_ports(variables['URL'], 'NOTSLO', 'array_id', 'director_id'), list)
assert isinstance(api.get_array_director_port(variables['URL'], 'NOTSLO', 'array_id', 'director_id', 'port_id'), dict)
assert isinstance(api.get_array_fastpolicies(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_fastpolicy(variables['URL'], 'NOTSLO', 'array_id', 'policy_id'), dict)
assert isinstance(api.get_array_hosts(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_host(variables['URL'], 'NOTSLO', 'array_id', 'host_id'), dict)
assert isinstance(api.get_array_hostgroups(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_hostgroup(variables['URL'], 'NOTSLO', 'array_id', 'hostgroup_id'), dict)
assert isinstance(api.get_array_initiators(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_initiator(variables['URL'], 'NOTSLO', 'array_id', 'initiator_id'), dict)
assert isinstance(api.get_array_maskingviews(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_maskingview(variables['URL'], 'NOTSLO', 'array_id', 'maskingview_id'), dict)
assert isinstance(api.get_array_maskingview_connections(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_ports(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_portgoups(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_portgroup(variables['URL'], 'NOTSLO', 'array_id', 'portgroup_id'), dict)
assert isinstance(api.get_array_storagegroups(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_storagegroup(variables['URL'], 'NOTSLO', 'array_id', 'storagegroup_id'), dict)
assert isinstance(api.get_array_thinpools(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_thinpool(variables['URL'], 'NOTSLO', 'array_id', 'thinpool_id'), dict)
assert isinstance(api.get_array_tiers(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_tier(variables['URL'], 'NOTSLO', 'array_id', 'tier_id'), dict)
assert isinstance(api.get_array_volumes(variables['URL'], 'NOTSLO', 'array_id'), list)
assert isinstance(api.get_array_volume(variables['URL'], 'NOTSLO', 'array_id', 'volume_id'), dict)
######################################
## REPLICATION Resource group
######################################
def test_replication(variables):
# in the following, 'resource_id' or similar string is a valid type string but
# will never be successfully found by the api
api = Restful(variables['URL'], variables['user'], variables['pass'])
assert isinstance(api.get_replica_abilities(variables['URL']), list)
assert isinstance(api.get_replica_devicegroups(variables['URL']), list)
assert isinstance(api.get_replica_devicegroup(variables['URL'], 'array_id', 'devicegroup_id'), dict)
assert isinstance(api.get_replica_devicegroup_number(variables['URL'], 'array_id', 'devicegroup_id'), dict)
assert isinstance(api.get_replica_arrays(variables['URL']), list)
assert isinstance(api.get_replica_array(variables['URL'], 'array_id'), dict)
assert isinstance(api.get_replica_rdfgroups(variables['URL'], 'array_id'), list)
assert isinstance(api.get_replica_rdfgroup(variables['URL'], 'array_id', 'rdfg_num'), dict)
assert isinstance(api.get_replica_storagegroups(variables['URL'], 'array_id'), list)
assert isinstance(api.get_replica_storagegroup(variables['URL'], 'array_id', 'storagegroup_id'), dict)
assert isinstance(api.get_replica_storagegroup_snaps(variables['URL'], 'array_id', 'storagegroup_id'), list)
assert isinstance(api.get_replica_storagegroup_snap(variables['URL'], 'array_id', 'storagegroup_id', 'snap_id'), dict)
assert isinstance(api.get_replica_storagegroup_snap_generations(variables['URL'], 'array_id', 'storagegroup_id', 'snap_id'), list)
assert isinstance(api.get_replica_storagegroup_snap_generation(variables['URL'], 'array_id', 'storagegroup_id', 'snap_id', 'generation_num'), dict)
'''
######################################
## SYSTEM Resource group
######################################
def getAlerts(self, URL):
def getAlert(self, URL, resourceId):
def getJobs(self, URL):
def getJob(self, URL, resourceId):
def getSymms(self, URL):
def getSymm(self, URL, resourceId):
def getSymmAlerts(self, URL, resourceId):
def getSymmAlert(self, URL, symId, alertId):
def getSymmJobs(self, URL, resourceId):
def getSymmJob(self, URL, symId, jobId):
def getVersion(self, URL):
######################################
## WORKLOAD Resource group
######################################
'''
| apache-2.0 | 8,647,825,385,781,915,000 | 57.470588 | 151 | 0.659759 | false |
AmericanResearchInstitute/ari-backup | ari_backup/__init__.py | 1 | 16849 | import os
import settings
import subprocess
import shlex
from logger import Logger
'''Wrapper around rdiff-backup
This module provides facilites for centrally managing a large set of
rdiff-backup backup jobs. Backup job management is built around common tools
like cron, run-parts, and xargs. The base features include:
* central configuration file
* backup jobs for local and remote hosts
* configurable job parallelization
* ability to run arbitrary commands locally or remotely before and after
backup jobs (something especially handy for preparing databases pre-backup)
* logging to syslog
The base features are designed to be extended and we include an extension to
manage the setup and tear down of LVM snapshots for backup.
'''
class ARIBackup(object):
'''Base class includes core features and basic rdiff-backup functionality
This class can be used if all that is needed is to leverage the basic
rdiff-backup features. The pre and post hook functionality as well as
command execution is also part of this class.
'''
def __init__(self, label, source_hostname, remove_older_than_timespec=None):
# The name of the backup job (this will be the name of the directory in the backup store
# that has the data).
self.label = label
# This is the host that has the source data.
self.source_hostname = source_hostname
# We'll bring in the remote_user from our settings, but it is a var
# that the end-user is welcome to override.
self.remote_user = settings.remote_user
# setup logging
self.logger = Logger('ARIBackup ({label})'.format(label=label), settings.debug_logging)
# Include nothing by default
self.include_dir_list = []
self.include_file_list = []
# Exclude nothing by default
# We'll put the '**' exclude on the end of the arg_list later
self.exclude_dir_list = []
self.exclude_file_list = []
# initialize hook lists
self.pre_job_hook_list = []
self.post_job_hook_list = []
if remove_older_than_timespec != None:
self.post_job_hook_list.append((
self._remove_older_than,
{'timespec': remove_older_than_timespec}))
def _process_pre_job_hooks(self):
self.logger.info('processing pre-job hooks...')
for task in self.pre_job_hook_list:
# Let's do some assignments for readability
hook = task[0]
kwargs = task[1]
hook(**kwargs)
def _process_post_job_hooks(self, error_case):
if error_case:
self.logger.error('processing post-job hooks for error case...')
else:
self.logger.info('processing post-job hooks...')
for task in self.post_job_hook_list:
# Let's do some assignments for readability
hook = task[0]
kwargs = task[1]
kwargs.update({'error_case': error_case})
hook(**kwargs)
def _run_command(self, command, host='localhost'):
'''Runs an arbitrary command on host.
Given an input string or list, we attempt to execute it on the host via
SSH unless host is "localhost".
Returns a tuple with (stdout, stderr) if the exitcode is zero,
otherwise an Exception is raised.
'''
# make args a list if it's not already so
if isinstance(command, basestring):
args = shlex.split(command)
elif isinstance(command, list):
args = command
else:
raise Exception('_run_command: command arg must be str or list')
# add SSH arguments if this is a remote command
if host != 'localhost':
ssh_args = shlex.split('%s %s@%s' % (settings.ssh_path, self.remote_user, host))
args = ssh_args + args
try:
self.logger.debug('_run_command %r' % args)
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# We really want to block until our subprocess exists or
# KeyboardInterrupt. If we don't, clean up tasks can likely fail.
try:
stdout, stderr = p.communicate()
except KeyboardInterrupt:
# TODO terminate() doesn't block, so we'll need to poll
p.terminate()
raise KeyboardInterrupt
if stdout:
self.logger.debug(stdout)
if stderr:
# Warning level should be fine here since we'll also look at
# the exitcode.
self.logger.warning(stderr)
exitcode = p.returncode
except IOError:
raise Exception('Unable to execute/find {args}'.format(args=args))
if exitcode > 0:
error_message = ('[{host}] A command terminated with errors and likely requires intervention. The '
'command attempted was "{command}".').format(
host=host, command=command)
raise Exception(error_message)
return (stdout, stderr)
def run_backup(self):
self.logger.info('started')
try:
error_case = False
self._process_pre_job_hooks()
self.logger.info('data backup started...')
self._run_backup()
self.logger.info('data backup complete')
except Exception, e:
error_case = True
self.logger.error((str(e)))
self.logger.info("let's try to clean up...")
except KeyboardInterrupt:
error_case = True
# using error level here so that these messages will
# print to the console
self.logger.error('backup job cancelled by user')
self.logger.error("let's try to clean up...")
finally:
self._process_post_job_hooks(error_case)
self.logger.info('stopped')
def _run_backup(self, top_level_src_dir='/'):
'''Run rdiff-backup job.
Builds an argument list for a full rdiff-backup command line based on
the settings in the instance and optionally the top_level_src_dir
parameter. Said parameter is used to define the context for the backup
mirror. This is especially handy when backing up mounted spanshots so
that the mirror doesn't contain the directory where the snapshot is
mounted.
'''
self.logger.debug('_run_backup started')
# Init our arguments list with the path to rdiff-backup.
# This will be in the format we'd normally pass to the command-line
# e.g. [ '--include', '/dir/to/include', '--exclude', '/dir/to/exclude']
arg_list = [settings.rdiff_backup_path]
# setup some default rdiff-backup options
# TODO provide a way to override these
arg_list.append('--exclude-device-files')
arg_list.append('--exclude-fifos')
arg_list.append('--exclude-sockets')
# Bring the terminal verbosity down so that we only see errors
arg_list += ['--terminal-verbosity', '1']
# This conditional reads strangely, but that's because rdiff-backup
# not only defaults to having SSH compression enabled, it also doesn't
# have an option to explicitly enable it -- only one to disable it.
if not settings.ssh_compression:
arg_list.append('--ssh-no-compression')
# Populate self.argument list
for exclude_dir in self.exclude_dir_list:
arg_list.append('--exclude')
arg_list.append(exclude_dir)
for exclude_file in self.exclude_file_list:
arg_list.append('--exclude-filelist')
arg_list.append(exclude_file)
for include_dir in self.include_dir_list:
arg_list.append('--include')
arg_list.append(include_dir)
for include_file in self.include_file_list:
arg_list.append('--include-filelist')
arg_list.append(include_file)
# Exclude everything else
arg_list.append('--exclude')
arg_list.append('**')
# Add a source argument
if self.source_hostname == 'localhost':
arg_list.append(top_level_src_dir)
else:
arg_list.append(
'{remote_user}@{source_hostname}::{top_level_src_dir}'.format(
remote_user=self.remote_user,
source_hostname=self.source_hostname,
top_level_src_dir=top_level_src_dir
)
)
# Add a destination argument
arg_list.append(
'{backup_store_path}/{label}'.format(
backup_store_path=settings.backup_store_path,
label=self.label
)
)
# Rdiff-backup GO!
self._run_command(arg_list)
self.logger.debug('_run_backup completed')
def _remove_older_than(self, timespec, error_case):
'''Trims increments older than timespec
Post-job hook that uses rdiff-backup's --remove-old-than feature to
trim old increments from the backup history
'''
if not error_case:
self.logger.info('remove_older_than %s started' % timespec)
arg_list = [settings.rdiff_backup_path]
arg_list.append('--force')
arg_list.append('--remove-older-than')
arg_list.append(timespec)
arg_list.append('%s/%s' % (settings.backup_store_path, self.label))
self._run_command(arg_list)
self.logger.info('remove_older_than %s completed' % timespec)
class LVMBackup(ARIBackup):
def __init__(self, label, source_hostname, remove_older_than_timespec=None):
super(LVMBackup, self).__init__(label, source_hostname, remove_older_than_timespec)
# This is a list of 2-tuples, where each inner 2-tuple expresses the LV
# to back up, the mount point for that LV any mount options necessary.
# For example: [('hostname/root, '/', 'noatime'),]
# TODO I wonder if noatime being used all the time makes sense to
# improve read performance and reduce writes to the snapshots.
self.lv_list = []
# a list of dicts with the snapshot paths and where they should be
# mounted
self.lv_snapshots = []
# mount the snapshots in a directory named for this job's label
self.snapshot_mount_point_base_path = os.path.join(settings.snapshot_mount_root, self.label)
# setup pre and post job hooks to manage snapshot work flow
self.pre_job_hook_list.append((self._create_snapshots, {}))
self.pre_job_hook_list.append((self._mount_snapshots, {}))
self.post_job_hook_list.append((self._umount_snapshots, {}))
self.post_job_hook_list.append((self._delete_snapshots, {}))
def _create_snapshots(self):
'''Creates snapshots of all the volumns listed in self.lv_list'''
self.logger.info('creating LVM snapshots...')
for volume in self.lv_list:
try:
lv_path, src_mount_path, mount_options = volume
except ValueError:
lv_path, src_mount_path = volume
mount_options = None
vg_name, lv_name = lv_path.split('/')
new_lv_name = lv_name + settings.snapshot_suffix
mount_path = '{snapshot_mount_point_base_path}{src_mount_path}'.format(
snapshot_mount_point_base_path=self.snapshot_mount_point_base_path,
src_mount_path=src_mount_path
)
# TODO Is it really OK to always make a 1GB exception table?
self._run_command('lvcreate -s -L 1G %s -n %s' % (lv_path, new_lv_name), self.source_hostname)
self.lv_snapshots.append({
'lv_path': vg_name + '/' + new_lv_name,
'mount_path': mount_path,
'mount_options': mount_options,
'created': True,
'mount_point_created': False,
'mounted': False,
})
def _delete_snapshots(self, error_case=None):
'''Deletes snapshots in self.lv_snapshots
This method behaves the same in the normal and error cases.
'''
self.logger.info('deleting LVM snapshots...')
for snapshot in self.lv_snapshots:
if snapshot['created']:
lv_path = snapshot['lv_path']
# -f makes lvremove not interactive
self._run_command('lvremove -f %s' % lv_path, self.source_hostname)
snapshot.update({'created': False})
def _mount_snapshots(self):
self.logger.info('mounting LVM snapshots...')
for snapshot in self.lv_snapshots:
lv_path = snapshot['lv_path']
device_path = '/dev/' + lv_path
mount_path = snapshot['mount_path']
mount_options = snapshot['mount_options']
# mkdir the mount point
self._run_command('mkdir -p %s' % mount_path, self.source_hostname)
snapshot.update({'mount_point_created': True})
# If where we want to mount our LV is already a mount point then
# let's back out.
if os.path.ismount(mount_path):
raise Exception("{mount_path} is already a mount point".format(mount_path=mount_path))
# mount the LV, possibly with mount options
if mount_options:
command = 'mount -o {mount_options} {device_path} {mount_path}'.format(
mount_options=mount_options,
device_path=device_path,
mount_path=mount_path
)
else:
command = 'mount {device_path} {mount_path}'.format(
device_path=device_path,
mount_path=mount_path
)
self._run_command(command, self.source_hostname)
snapshot.update({'mounted': True})
def _umount_snapshots(self, error_case=None):
'''Umounts mounted snapshots in self.lv_snapshots
This method behaves the same in the normal and error cases.
'''
# TODO If the user doesn't put '/' in their include_dir_list, then
# we'll end up with directories around where the snapshots are mounted
# that will not get cleaned up. We should probably add functionality
# to make sure the "label" directory is recursively removed.
# Check out shutil.rmtree() to help resolve this issue.
self.logger.info('umounting LVM snapshots...')
# We need a local copy of the lv_snapshots list to muck with in
# this method.
local_lv_snapshots = self.lv_snapshots
# We want to umount these LVs in reverse order as this should ensure
# that we umount the deepest paths first.
local_lv_snapshots.reverse()
for snapshot in local_lv_snapshots:
mount_path = snapshot['mount_path']
if snapshot['mounted']:
self._run_command('umount %s' % mount_path, self.source_hostname)
snapshot.update({'mounted': False})
if snapshot['mount_point_created']:
self._run_command('rmdir %s' % mount_path, self.source_hostname)
snapshot.update({'mount_point_created': False})
def _run_backup(self):
'''Run backup of LVM snapshots'''
self.logger.debug('LVMBackup._run_backup started')
# Cook the self.include_dir_list and self.exclude_dir_list so that the
# src paths include the mount path for the LV(s).
local_include_dir_list = []
for include_dir in self.include_dir_list:
local_include_dir_list.append('{snapshot_mount_point_base_path}{include_dir}'.format(
snapshot_mount_point_base_path=snapshot_mount_point_base_path,
include_dir=include_dir
))
local_exclude_dir_list = []
for exclude_dir in self.exclude_dir_list:
local_exclude_dir_list.append('{snapshot_mount_point_base_path}{exclude_dir}'.format(
snapshot_mount_point_base_path=snapshot_mount_point_base_path,
exclude_dir=exclude_dir
))
self.include_dir_list = local_include_dir_list
self.exclude_dir_list = local_exclude_dir_list
# We don't support include_file_list and exclude_file_list in this
# class as it would take extra effort and it's not likely to be used.
# Have the base class perform an rdiff-backup
super(LVMBackup, self)._run_backup(self.snapshot_mount_point_base_path)
self.logger.debug('LVMBackup._run_backup completed')
| bsd-3-clause | 7,867,966,649,929,638,000 | 38.366822 | 111 | 0.599442 | false |
stccenter/datadiscovery | ranking/evaluation.py | 1 | 12251 | # -*- coding: utf-8 -*-
"""
Created on Tue May 30 13:13:44 2017
@author: larakamal
total evaluation evalutes the sorted documents
using precision and NDCG
change the directory of the sorted documents from lines 79-87
change the directory of the precision and NDCG graphs from
line 345 and 352
"""
from math import log10
import csv
import numpy as np
import pandas as pd
def getNDCG(list, k):
#convert to double
dcg = float(getDCG(list,k))
idcg = float(getIDCG(list,k))
ndcg = 0.0
if (idcg > 0.0):
ndcg = dcg/idcg
return ndcg
def getPrecision(list, k):
size = len(list)
if (size == 0 or k == 0):
return 0.0
if(k > size):
k = size
rel_doc_num = getRelevantDocNum(list,k)
#convert to double
precision = float(float(rel_doc_num)/float(k))
return precision
def getRelevantDocNum(list,k):
size = len(list)
if (size == 0 or k == 0):
return 0
if (k > size):
k = size
rel_num = 0
for i in range(k):
if list[i] > 5:
rel_num = rel_num + 1
return rel_num
def getDCG(list,k):
size = len(list)
if (size == 0 or k == 0):
return 0.0
if (k > size):
k = size
#convert to double
dcg = list[0]
dcg = float(dcg)
for i in range(1,k):
rel = list[i]
pos = i+1
rel_log = log10(pos)/log10(2)
rel_log = float(rel_log)
dcg = dcg + (rel/rel_log)
return dcg
def getIDCG(list, k):
# sort list
sortedList = list
sortedList = sorted(sortedList, key=int, reverse=True)
idcg = getDCG(sortedList, k)
return float(idcg)
def evaluate(algorithm):
path = "data/results/test/" + algorithm + "/"
#change directory of the ranked documents
dataframe1 = pd.read_csv(path + "gravity_sorted.csv")
dataframe2 = pd.read_csv(path + "ocean pressure_sorted.csv")
dataframe3 = pd.read_csv(path + "ocean temperature_sorted.csv")
dataframe4 = pd.read_csv(path + "ocean wind_sorted.csv")
dataframe5 = pd.read_csv(path + "pathfinder_sorted.csv")
dataframe6 = pd.read_csv(path + "quikscat_sorted.csv")
dataframe7 = pd.read_csv(path + "radar_sorted.csv")
dataframe8 = pd.read_csv(path + "saline density_sorted.csv")
dataframe9 = pd.read_csv(path + "sea ice_sorted.csv")
label1 = dataframe1.ix[:,10:11]
label2 = dataframe2.ix[:,10:11]
label3 = dataframe3.ix[:,10:11]
label4 = dataframe4.ix[:,10:11]
label5 = dataframe5.ix[:,10:11]
label6 = dataframe6.ix[:,10:11]
label7 = dataframe7.ix[:,10:11]
label8 = dataframe8.ix[:,10:11]
label9 = dataframe9.ix[:,10:11]
temp_list1 = label1['label'].tolist()
temp_list2 = label2['label'].tolist()
temp_list3 = label3['label'].tolist()
temp_list4 = label4['label'].tolist()
temp_list5 = label5['label'].tolist()
temp_list6 = label6['label'].tolist()
temp_list7 = label7['label'].tolist()
temp_list8 = label8['label'].tolist()
temp_list9 = label9['label'].tolist()
label_list1 = [];
label_list2 = [];
label_list3 = [];
label_list4 = [];
label_list5 = [];
label_list6 = [];
label_list7 = [];
label_list8 = [];
label_list9 = [];
for i in range(len(temp_list1)):
if temp_list1[i] == 'Excellent':
label_list1.append(7)
elif temp_list1[i] == 'Very good':
label_list1.append(6)
elif temp_list1[i] == 'Good':
label_list1.append(5)
elif temp_list1[i] == 'Ok':
label_list1.append(4)
elif temp_list1[i] == 'Bad':
label_list1.append(3)
elif temp_list1[i] == 'Very bad':
label_list1.append(2)
elif temp_list1[i] == 'Terrible':
label_list1.append(1)
else:
label_list1.append(0)
for i in range(len(temp_list2)):
if temp_list2[i] == 'Excellent':
label_list2.append(7)
elif temp_list2[i] == 'Very good':
label_list2.append(6)
elif temp_list2[i] == 'Good':
label_list2.append(5)
elif temp_list2[i] == 'Ok':
label_list2.append(4)
elif temp_list2[i] == 'Bad':
label_list2.append(3)
elif temp_list2[i] == 'Very bad':
label_list2.append(2)
elif temp_list2[i] == 'Terrible':
label_list2.append(1)
else:
label_list2.append(0)
for i in range(len(temp_list3)):
if temp_list3[i] == 'Excellent':
label_list3.append(7)
elif temp_list3[i] == 'Very good':
label_list3.append(6)
elif temp_list3[i] == 'Good':
label_list3.append(5)
elif temp_list3[i] == 'Ok':
label_list3.append(4)
elif temp_list3[i] == 'Bad':
label_list3.append(3)
elif temp_list3[i] == 'Very bad':
label_list3.append(2)
elif temp_list3[i] == 'Terrible':
label_list3.append(1)
else:
label_list3.append(0)
for i in range(len(temp_list4)):
if temp_list4[i] == 'Excellent':
label_list4.append(7)
elif temp_list4[i] == 'Very good':
label_list4.append(6)
elif temp_list4[i] == 'Good':
label_list4.append(5)
elif temp_list4[i] == 'Ok':
label_list4.append(4)
elif temp_list4[i] == 'Bad':
label_list4.append(3)
elif temp_list4[i] == 'Very bad':
label_list4.append(2)
elif temp_list4[i] == 'Terrible':
label_list4.append(1)
else:
label_list4.append(0)
for i in range(len(temp_list5)):
if temp_list5[i] == 'Excellent':
label_list5.append(7)
elif temp_list5[i] == 'Very good':
label_list5.append(6)
elif temp_list5[i] == 'Good':
label_list5.append(5)
elif temp_list5[i] == 'Ok':
label_list5.append(4)
elif temp_list5[i] == 'Bad':
label_list5.append(3)
elif temp_list5[i] == 'Very bad':
label_list5.append(2)
elif temp_list5[i] == 'Terrible':
label_list5.append(1)
else:
label_list5.append(0)
for i in range(len(temp_list6)):
if temp_list6[i] == 'Excellent':
label_list6.append(7)
elif temp_list6[i] == 'Very good':
label_list6.append(6)
elif temp_list6[i] == 'Good':
label_list6.append(5)
elif temp_list6[i] == 'Ok':
label_list6.append(4)
elif temp_list6[i] == 'Bad':
label_list6.append(3)
elif temp_list6[i] == 'Very bad':
label_list6.append(2)
elif temp_list6[i] == 'Terrible':
label_list6.append(1)
else:
label_list6.append(0)
for i in range(len(temp_list7)):
if temp_list7[i] == 'Excellent':
label_list7.append(7)
elif temp_list7[i] == 'Very good':
label_list7.append(6)
elif temp_list7[i] == 'Good':
label_list7.append(5)
elif temp_list7[i] == 'Ok':
label_list7.append(4)
elif temp_list7[i] == 'Bad':
label_list7.append(3)
elif temp_list7[i] == 'Very bad':
label_list7.append(2)
elif temp_list7[i] == 'Terrible':
label_list7.append(1)
else:
label_list7.append(0)
for i in range(len(temp_list8)):
if temp_list8[i] == 'Excellent':
label_list8.append(7)
elif temp_list8[i] == 'Very good':
label_list8.append(6)
elif temp_list8[i] == 'Good':
label_list8.append(5)
elif temp_list8[i] == 'Ok':
label_list8.append(4)
elif temp_list8[i] == 'Bad':
label_list8.append(3)
elif temp_list8[i] == 'Very bad':
label_list8.append(2)
elif temp_list8[i] == 'Terrible':
label_list8.append(1)
else:
label_list8.append(0)
for i in range(len(temp_list9)):
if temp_list9[i] == 'Excellent':
label_list9.append(7)
elif temp_list9[i] == 'Very good':
label_list9.append(6)
elif temp_list9[i] == 'Good':
label_list9.append(5)
elif temp_list9[i] == 'Ok':
label_list9.append(4)
elif temp_list9[i] == 'Bad':
label_list9.append(3)
elif temp_list9[i] == 'Very bad':
label_list9.append(2)
elif temp_list9[i] == 'Terrible':
label_list9.append(1)
else:
label_list9.append(0)
NDCG_list1 = []
NDCG_list2 = []
NDCG_list3 = []
NDCG_list4 = []
NDCG_list5 = []
NDCG_list6 = []
NDCG_list7 = []
NDCG_list8 = []
NDCG_list9 = []
for i in range(1,41):
k = i
NDCG_list1.append(getNDCG(label_list1,k))
NDCG_list2.append(getNDCG(label_list2,k))
NDCG_list3.append(getNDCG(label_list3,k))
NDCG_list4.append(getNDCG(label_list4,k))
NDCG_list5.append(getNDCG(label_list5,k))
NDCG_list6.append(getNDCG(label_list6,k))
NDCG_list7.append(getNDCG(label_list7,k))
NDCG_list8.append(getNDCG(label_list8,k))
NDCG_list9.append(getNDCG(label_list9,k))
precision_list1 = []
precision_list2 = []
precision_list3 = []
precision_list4 = []
precision_list5 = []
precision_list6 = []
precision_list7 = []
precision_list8 = []
precision_list9 = []
for i in range(1,41):
k = i
precision_list1.append(getPrecision(label_list1,k))
precision_list2.append(getPrecision(label_list2,k))
precision_list3.append(getPrecision(label_list3,k))
precision_list4.append(getPrecision(label_list4,k))
precision_list5.append(getPrecision(label_list5,k))
precision_list6.append(getPrecision(label_list6,k))
precision_list7.append(getPrecision(label_list7,k))
precision_list8.append(getPrecision(label_list8,k))
precision_list9.append(getPrecision(label_list9,k))
total_list_NDCG = []
for i in range(len(NDCG_list1)):
average = (NDCG_list1[i] + NDCG_list2[i]+ NDCG_list3[i] + NDCG_list4[i]+ NDCG_list5[i] + NDCG_list6[i] + NDCG_list7[i] + NDCG_list8[i] + NDCG_list9[i])/9
array = np.array([NDCG_list1[i],NDCG_list2[i], NDCG_list3[i], NDCG_list4[i], NDCG_list5[i], NDCG_list6[i], NDCG_list7[i], NDCG_list8[i], NDCG_list9[i], average])
total_list_NDCG.append(array)
total_list_precision = []
for i in range(len(precision_list1)):
average = (precision_list1[i] + precision_list2[i]+ precision_list3[i] + precision_list4[i]+ precision_list5[i] + precision_list6[i] + precision_list7[i] + precision_list8[i] + precision_list9[i])/9
array = np.array([precision_list1[i],precision_list2[i], precision_list3[i], precision_list4[i], precision_list5[i], precision_list6[i], precision_list7[i], precision_list8[i], precision_list9[i], average])
total_list_precision.append(array)
with open('data/results/rank/' + algorithm + 'NDCG_graph.csv', 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['label'])
writer.writerow(['gravity', 'ocean_pressure', 'ocean_temperature', 'ocean_wind', 'pathfinder','quikscat', 'radar', 'saline_density','sea_ice', algorithm])
for i in total_list_NDCG:
writer.writerow(i)
with open('data/results/rank/' + algorithm + 'precision_graph.csv', 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['label'])
writer.writerow(['gravity', 'ocean_pressure', 'ocean_temperature', 'ocean_wind', 'pathfinder','quikscat', 'radar', 'saline_density','sea_ice', algorithm])
for i in total_list_precision:
writer.writerow(i) | apache-2.0 | -4,196,124,341,994,484,000 | 32.939058 | 214 | 0.548119 | false |
ufo2mstar/PersonalPys | GreyscriptNB15.py | 1 | 2408 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Kyu
#
# Created: 01/11/2012
# Copyright: (c) Kyu 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
# Combi Try!
import sys
import os
path=os.getcwd()
def makeit(allincd,l):
#combi='\"copy -b\"'+allmp3s+' '+path+'\\comb\\'+files[1][:-9]+'.mp3\"\"';
combi='\"copy /b '+allincd+' \"CD '+str(l)+'.mp3\"';
print combi
os.system(combi);
if __name__ == '__main__':
## cdname=[]
j=0
allincd=''
# Get files to unzip
for root, dirs, files in os.walk(path):
cdname=[]
for i in range(106,121):
## j=j+1
for fil in files:
if 'Track '+str(i).zfill(3) in fil:
print '>>> founddddd = track ',i
if '.mp3' in fil:
cdname.append('\"'+fil+'\"')
print cdname
print 'all in cd = ',allincd,'\n'
if len(cdname)==15:
j=j+1
allincd=' + '.join(cdname)
makeit(allincd,j)
cdname=[]
# New walk to get unziped mp3 files
##mp3files=[];
##for root, dirs, files in os.walk(path):
#### print files
## for fil in files:
## if fil[-4:]=='.mp3':
## mp3files.append('\"'+fil+'\"')
## allmp3s=''
#### for mp3f in mp3files:
## allmp3s=' + '.join(mp3files)
## print 'all mp3 cat = ',allmp3s,'\n'
## #combi='\"copy -b\"'+allmp3s+' '+path+'\\comb\\'+files[1][:-9]+'.mp3\"\"';
## combi='\"copy /b '+allmp3s+' '+'nice.mp3\"';
## print combi
## os.system(combi);
##
### Delete unziped and copied file
## dell='\"del '+allmp3s+'\"'
## os.system(dell)
##
# Garbage
## ofile=open(files,'w')
## ifile=open('a.mp3','r')
#### ofile=open('ans/'+name,'w')
## newlines=[]
## for line in ifile:
#### line0 = str.strip(line)
#### if line0 !='':
#### if line0[0] != '%': #and line0[0] != '%' and line0[-1] != ';':
#### if '%' in line0:
#### newlines.append(line[:line.find("%")]+'\n')
#### else:
## newlines.append(line)
## print line
##
| gpl-2.0 | 4,172,745,439,984,703,000 | 28.012048 | 86 | 0.405316 | false |
f0rki/cb-multios | original-challenges/A_Game_of_Chance/support/dealer.py | 1 | 9666 | #!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from random import choice, randint
import support as sp
from common import DEBUG, CONFIG
from player import Player as p
from pov_deck import POV_DECK_BYTES
TRIGGER_BYTES = ['\x21', '\x5f', '\x3c', '\x42', '\x63', '\x51']
# 'NAME': (IDX, BYTE)
TRIGGER_BYTE_LOOKUP = {
'DECK': (0, TRIGGER_BYTES[0]),
'DEAL': (1, TRIGGER_BYTES[1]),
'DATA': (2, TRIGGER_BYTES[2]),
'BALANCE': (3, TRIGGER_BYTES[3]),
'COUNT': (4, TRIGGER_BYTES[4]),
'QUIT': (5, TRIGGER_BYTES[5]),
}
def get_non_trigger_byte():
b = chr(randint(0, 255))
while b in TRIGGER_BYTES:
b = chr(randint(0,255))
return b
class Dealer(object):
def __init__(self, magic_page):
self.players = []
self.magic_page = magic_page
self.deck = []
self.ok = ''
def add_players(self, p_count):
for i in range(1, p_count + 1):
noob = p(i, self.magic_page)
self.players.append(noob)
self.balances = [0 for _ in range(p_count + 1)]
self.wagers = [0 for _ in range(p_count + 1)]
self.results = [0 for _ in range(p_count + 1)]
self.deal1 = [0 for _ in range(p_count + 1)]
self.deal2 = [0 for _ in range(p_count + 1)]
if DEBUG:
print "added {0} players".format(p_count)
def make_trigger_buf(self, cmd_name):
cmd = ''
for cn in ['DECK', 'DEAL', 'DATA', 'BALANCE', 'COUNT', 'QUIT']:
if cn == cmd_name:
(idx, b) = TRIGGER_BYTE_LOOKUP[cmd_name]
cmd += b
else:
cmd += get_non_trigger_byte()
return cmd
def make_new_random_deck(self, count, bad_cards=False):
deck = [chr(randint(CONFIG['MIN_CARD_VALUE'], CONFIG['MAX_CARD_VALUE'])) for _ in range(count)]
if True == bad_cards:
deck[count/2] = chr(randint(CONFIG['MAX_CARD_VALUE'] + 1, 255))
deck[count-1] = chr(randint(CONFIG['MAX_CARD_VALUE'] + 1, 255))
if DEBUG:
print "making new deck with {0} cards with {1}bad cards".format(count, "" if bad_cards else "no ")
# if DEBUG:
# print "deck:"
# for c in deck:
# print " card 0x{0:02x},".format(ord(c))
return deck
def gen_ok(self):
ok = [0,0,0,0,0,0,0,0]
for idx in range(len(self.magic_page)):
ok[idx % 8] ^= ord(self.magic_page[idx])
self.ok = ''.join([chr(c) for c in ok])
def get_packed_balances(self):
p_bal = ''
if DEBUG:
print " balances were: {0}".format(self.balances)
for b in self.balances:
p_bal += sp.pack_single_int32(b)
return p_bal
def get_packed_wagers(self):
p_wag = ''
if DEBUG:
print " wagers were: {0}".format(self.wagers)
for w in self.wagers:
p_wag += sp.pack_single_uint32(w)
return p_wag
def get_packed_results(self):
p_res = ''
if DEBUG:
print " results were: {0}".format(self.results)
for r in self.results:
p_res += sp.pack_single_uint32(r)
return p_res
def enough_cards_for_round(self):
return 0 < len(self.deck) and len(self.deck) >= len(self.players)
def calculate_round_results(self):
max_sum = 0
if DEBUG:
print "calculating round results"
for idx in range(1, len(self.results)):
if max_sum < self.deal1[idx] + self.deal2[idx]:
max_sum = self.deal1[idx] + self.deal2[idx]
if DEBUG:
print " updated round max_sum {0}".format(max_sum)
for idx in range(1, len(self.results)):
if DEBUG:
print " calculating results for player {0}".format(idx)
my_sum = self.deal1[idx] + self.deal2[idx]
if DEBUG:
print " round sum {1}".format(idx, my_sum)
if my_sum == max_sum:
self.results[idx] = CONFIG['WIN']
if DEBUG:
" WIN".format(idx)
else:
self.results[idx] = CONFIG['LOSS']
if DEBUG:
" LOSS".format(idx)
def get_next_card(self):
c = self.deck.pop()
if DEBUG:
print "got next card 0x{0:02x}".format(ord(c))
return c
def play_one_round(self):
if DEBUG:
print "play one round with {0} players".format(len(self.players))
# deal card to each player
for p in self.players:
c = self.get_next_card()
self.deal1[p.id] = ord(c)
p.cards[0] = ord(c)
if DEBUG:
print " player {0} dealt card 0x{1:02x}".format(p.id, ord(c))
if DEBUG:
print " cards from deal 1: {0}".format(self.deal1)
# get wager from each player
for p in self.players:
self.wagers[p.id] = p.get_wager()
if DEBUG:
print " player {0} wagered {1}".format(p.id, self.wagers[p.id])
# deal card to each player
for p in self.players:
c = self.get_next_card()
self.deal2[p.id] = ord(c)
p.cards[1] = ord(c)
if DEBUG:
print " player {0} dealt card 0x{1:02x}".format(p.id, ord(c))
if DEBUG:
print " cards from deal 2: {0}".format(self.deal2)
self.calculate_round_results()
# pay each player
for p in self.players:
if CONFIG['WIN'] == self.results[p.id]:
p.exchange_money(self.wagers[p.id])
p.win()
self.balances[p.id] += self.wagers[p.id]
else:
p.exchange_money(-self.wagers[p.id])
p.loss()
self.balances[p.id] += -self.wagers[p.id]
## For testing ##
def total_magic_page_indices_used(self):
mpiu = set()
for p in self.players:
mpiu.update(p.magic_page_indices_used)
mpiu_l = list(mpiu)
mpiu_l.sort()
return mpiu_l
def check_magic_bytes_usage(self):
mpiu_list = self.total_magic_page_indices_used()
# print " total set of magic_page_indices_used: {0}".format(mpiu_list)
for idx in range(len(mpiu_list) - 3):
i0 = mpiu_list[idx]
i1 = mpiu_list[idx + 1]
i2 = mpiu_list[idx + 2]
i3 = mpiu_list[idx + 3]
if i0 == i1 - 1 and \
i1 == i2 - 1 and \
i2 == i3 - 1:
if DEBUG:
print "#### Warning 4 consecutive magic page indices: {0}, {1}, {2}, {3}.".format(i0, i1, i2, i3)
print "magic total_magic_page_indices_used: {0}".format(mpiu_list)
return [i0, i1, i2, i3]
if DEBUG:
print " Done checking for 4 consecutive magic page indices."
return []
def check_deck(deck_sz=1000):
if DEBUG:
print "find_trigger_deck size {0}".format(deck_sz)
# setup
d = Dealer('A'*4096)
d.add_players(CONFIG['PLAYER_COUNT'])
# make deck
deck = d.make_new_random_deck(deck_sz)
d.deck = list(deck)
mpiu_list = []
while d.enough_cards_for_round():
# play a round
d.play_one_round()
# break if found 4 consecutive fp indices
mpiu_list = d.check_magic_bytes_usage()
if [] != mpiu_list:
if DEBUG:
print "indices {0}".format(mpiu_list)
return deck
return []
def find_trigger_deck():
deck_sz = 2500
deck = check_deck(deck_sz)
while [] == deck:
deck_sz += 10
deck = check_deck(deck_sz)
if DEBUG:
print "trigger deck len {0}, deck {1}".format(len(deck), deck)
return deck
def test_trigger_deck(deck):
if DEBUG:
print "test_trigger_deck"
# setup
d = Dealer('A'*4096)
d.add_players(CONFIG['PLAYER_COUNT'])
d.deck = list(deck)
mpiu_list = []
while d.enough_cards_for_round():
# play a round
d.play_one_round()
# break if found 4 consecutive fp indices
mpiu_list = d.check_magic_bytes_usage()
if [] != mpiu_list:
# if DEBUG:
print "trigger indices {0}".format(mpiu_list)
return True
return False
if __name__ == '__main__':
# deck = find_trigger_deck()
deck = POV_DECK_BYTES
print test_trigger_deck(deck)
| mit | -238,666,694,014,413,470 | 31.22 | 117 | 0.542727 | false |
bhrzslm/uncertainty-reasoning | my_engine/others/GrMPy/lib/GrMPy/Tests/test_BNET_sumproduct.py | 1 | 3633 | # Author: Almero Gouws <[email protected]>
"""
This is a tutorial on how to create a Bayesian network, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
from sprinkler_data import sprinkler_evidence, sprinkler_probs
"""Import the GrMPy modules"""
import models
import inference
import cpds
def test_bnet_sumproduct():
"""
Testing: SUM-PRODUCT on BNET
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
node_sizes = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds[W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
"""
Intialize the BNET's inference engine to use EXACT inference
by setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
all_ev = sprinkler_evidence();
all_prob = sprinkler_probs();
count = 0;
errors = 0;
for evidence in all_ev:
"""Execute the max-sum algorithm"""
net.sum_product(evidence)
ans = [1, 1, 1, 1]
marginal = net.marginal_nodes([C])
if evidence[C] is None:
ans[C] = marginal.T[1]
marginal = net.marginal_nodes([S])
if evidence[S] is None:
ans[S] = marginal.T[1]
marginal = net.marginal_nodes([R])
if evidence[R] is None:
ans[R] = marginal.T[1]
marginal = net.marginal_nodes([W])
if evidence[W] is None:
ans[W] = marginal.T[1]
errors = errors + \
np.round(np.sum(np.array(ans) - np.array(all_prob[count])), 3)
count = count + 1
assert errors == 0
| mit | -8,610,013,309,501,509,000 | 27.778689 | 79 | 0.517754 | false |
kartikshah1/Test | courseware/serializers.py | 1 | 2712 | """
Serializers for the courseware API
"""
from rest_framework import serializers
from courseware import models
from video.serializers import VideoSerializer
from quiz.serializers import QuizSerializer
from document.serializers import DocumentSerializer
class AddGroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.Group
exclude = ('pages', 'course')
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.Group
exclude = ('pages',)
class ConceptSerializer(serializers.ModelSerializer):
"""
Serializer for Concept
"""
videos = VideoSerializer(many=True)
quizzes = QuizSerializer(many=True)
pages = DocumentSerializer(many=True)
class Meta:
"""
Defining model
"""
model = models.Concept
fields = ('id', 'title', 'description', 'image', 'playlist', 'is_published')
#fields = ('id', 'group', 'title', 'image', 'playlist')
class ConceptDataPlaylistSerializer(serializers.Serializer):
"""
Serializer to create the playlist to send to the concept page
"""
id = serializers.IntegerField()
title = serializers.CharField(default='title not specified')
seen_status = serializers.BooleanField(default=False)
toc = serializers.CharField()
url = serializers.CharField()
class GroupPlaylistSerializer(serializers.Serializer):
"""
Serializer for the playlist of a group_playlist
"""
id = serializers.IntegerField()
title = serializers.CharField()
class ConceptDataSerializer(serializers.Serializer):
"""
Selrializer to send the data required for the
concept page
"""
id = serializers.IntegerField()
title = serializers.CharField(default='title_not_specified')
description = serializers.CharField(default='description_not_provided')
group = serializers.IntegerField(default=0)
group_title = serializers.CharField(default='group_not_spefified')
course = serializers.IntegerField(default=0)
course_title = serializers.CharField(default='course_not_specified')
playlist = ConceptDataPlaylistSerializer(many=True)
current_video = serializers.IntegerField(default=-1)
group_playlist = GroupPlaylistSerializer(many=True)
course_playlist = GroupPlaylistSerializer(many=True)
title_document = DocumentSerializer()
class ConceptHistorySerializer(serializers.ModelSerializer):
"""
Serializer for ConceptHistory
"""
class Meta:
"""
Defining model
"""
model = models.ConceptHistory
class AddQuizSerializer(serializers.Serializer):
title = serializers.CharField(max_length=models.SHORT_TEXT)
| mit | -2,709,606,381,594,734,600 | 28.16129 | 84 | 0.707965 | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201811/reconciliation_line_item_report_service/get_reconciliation_line_item_reports_for_reconciliation_report.py | 1 | 3086 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all line item reports for a given reconciliation report.
To determine how many reconciliation reports exist,
run get_all_reconciliation_reports.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the reconciliation report to query.
RECONCILIATION_REPORT_ID = 'INSERT_RECONCILIATION_REPORT_ID_HERE'
def main(client):
# Initialize appropriate service.
reconciliation_line_item_report_service = (client.GetService(
'ReconciliationLineItemReportService', version='v201811'))
# Create a statement to select reconciliation line item reports.
statement = (ad_manager.StatementBuilder(version='v201811')
.Where(('reconciliationReportId = :reconciliationReportId AND '
'lineItemId != :lineItemId'))
.OrderBy('lineItemId', ascending=True)
.WithBindVariable('reconciliationReportId',
RECONCILIATION_REPORT_ID)
.WithBindVariable('lineItemId', 0))
# Retrieve a small amount of reconciliation line item reports at a time,
# paging through until all reconciliation line item reports have been
# retrieved.
result_set_size = 0
should_continue = True
while should_continue:
page = (reconciliation_line_item_report_service
.getReconciliationLineItemReportsByStatement(
statement.ToStatement()))
if 'results' in page and len(page['results']):
result_set_size += page['totalResultSetSize']
# Iterate over individual results in the page.
for line_item_report in page['results']:
print ('Reconciliation line item report with ID %d, line item ID %d, '
'reconciliation source "%s", and reconciled volume %d was '
'found.' % (line_item_report['id'],
line_item_report['lineItemId'],
line_item_report['reconciliationSource'],
(line_item_report['reconciledVolume']
if 'reconciledVolume' in line_item_report else 0)))
# Update statement for next page.
statement.offset += statement.limit
should_continue = statement.offset < result_set_size
print 'Number of results found: %d' % result_set_size
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 | 3,541,868,690,910,024,000 | 40.146667 | 79 | 0.680817 | false |
elewis33/doorstop | doorstop/core/test/test_builder.py | 1 | 2041 | """Unit tests for the doorstop.core.builder module."""
import unittest
from unittest.mock import patch, Mock
from doorstop.core.tree import Tree
from doorstop.core.builder import build, find_document, find_item, _clear_tree
from doorstop.core.test import FILES, EMPTY
from doorstop.core.test import MockDocumentSkip, MockDocumentNoSkip
class TestModule(unittest.TestCase):
"""Unit tests for the doorstop.core.builder module."""
@patch('doorstop.core.vcs.find_root', Mock(return_value=EMPTY))
def test_run_empty(self):
"""Verify an empty directory is an empty hierarchy."""
tree = build(EMPTY)
self.assertEqual(0, len(tree))
@patch('doorstop.core.document.Document', MockDocumentNoSkip)
@patch('doorstop.core.vcs.find_root', Mock(return_value=FILES))
def test_build(self):
"""Verify a tree can be built."""
tree = build(FILES)
self.assertEqual(4, len(tree))
@patch('doorstop.core.document.Document', MockDocumentSkip)
@patch('doorstop.core.vcs.find_root', Mock(return_value=FILES))
def test_build_with_skips(self):
"""Verify documents can be skipped while building a tree."""
tree = build(FILES)
self.assertEqual(0, len(tree))
@patch('doorstop.core.builder.build', Mock(return_value=Tree(Mock())))
@patch('doorstop.core.tree.Tree.find_document')
def test_find_document(self, mock_find_document): # pylint: disable=R0201
"""Verify documents can be found using a convenience function."""
_clear_tree()
prefix = 'req'
find_document(prefix)
mock_find_document.assert_called_once_with(prefix)
@patch('doorstop.core.builder.build', Mock(return_value=Tree(Mock())))
@patch('doorstop.core.tree.Tree.find_item')
def test_find_item(self, mock_find_item): # pylint: disable=R0201
"""Verify items can be found using a convenience function."""
_clear_tree()
uid = 'req1'
find_item(uid)
mock_find_item.assert_called_once_with(uid)
| lgpl-3.0 | -8,596,280,262,397,212,000 | 37.509434 | 78 | 0.677119 | false |
juliakreutzer/bandit-neuralmonkey | neuralmonkey/model/stateful.py | 1 | 3523 | """Module that provides classes that encapsulate model parts with states.
There are three classes: `Stateful`, `TemporalStateful`, and `SpatialStateful`.
Model parts that do not keep states in time but have a single tensor on the
output should be instances of `Stateful`. Model parts that keep their hidden
states in a time-oriented list (e.g. recurrent encoder) should be instances
of `TemporalStateful`. Model parts that keep the states in a 2D matrix (e.g.
image encoders) should be instances of `SpatialStateful`.
There are also classes that inherit from both stateful and temporal or spatial
stateful (e.g. `TemporalStatefulWithOutput`) that can be used for model parts
that satisfy more requirements (e.g. recurrent encoder).
"""
from abc import ABCMeta, abstractproperty
import tensorflow as tf
# pylint: disable=too-few-public-methods
# pydocstyle: disable=
class Stateful(metaclass=ABCMeta):
@abstractproperty
def output(self) -> tf.Tensor:
"""Return the object output.
A 2D `Tensor` of shape (batch, state_size) which contains the
resulting state of the object.
"""
raise NotImplementedError("Abstract property")
# pylint: enable=too-few-public-methods
class TemporalStateful(metaclass=ABCMeta):
@abstractproperty
def temporal_states(self) -> tf.Tensor:
"""Return object states in time.
A 3D `Tensor` of shape (batch, time, state_size) which contains the
states of the object in time (e.g. hidden states of a recurrent
encoder.
"""
raise NotImplementedError("Abstract property")
@abstractproperty
def temporal_mask(self) -> tf.Tensor:
"""Return mask for the temporal_states.
A 2D `Tensor` of shape (batch, time) of type float32 which masks the
temporal states so each sequence can have a different length. It should
only contain ones or zeros.
"""
raise NotImplementedError("Abstract property")
@property
def lengths(self) -> tf.Tensor:
"""Return the sequence lengths.
A 1D `Tensor` of type `int32` that stores the lengths of the
state sequences in the batch.
"""
return tf.to_int32(tf.reduce_sum(self.temporal_mask, 1))
@property
def dimension(self) -> int:
"""Return the dimension of the states."""
return self.temporal_states.get_shape()[-1].value
class SpatialStateful(metaclass=ABCMeta):
@property
def spatial_states(self) -> tf.Tensor:
"""Return object states in space.
A 4D `Tensor` of shape (batch, width, height, state_size) which
contains the states of the object in space (e.g. final layer of a
convolution network processing an image.
"""
raise NotImplementedError("Abstract property")
@abstractproperty
def spatial_mask(self) -> tf.Tensor:
"""Return mask for the spatial_states.
A 3D `Tensor` of shape (batch, width, height) of type float32
which masks the spatial states that they can be of different shapes.
The mask should only contain ones or zeros.
"""
raise NotImplementedError("Abstract property")
@property
def dimension(self) -> int:
"""Return the dimension of the states."""
return self.spatial_states.get_shape()[-1].value
# pylint: disable=abstract-method
class TemporalStatefulWithOutput(Stateful, TemporalStateful):
pass
class SpatialStatefulWithOutput(Stateful, SpatialStateful):
pass
| bsd-3-clause | -9,093,537,448,114,825,000 | 33.539216 | 79 | 0.68805 | false |
crevetor/vcrwrapper | vcrwrapper/vcrutils.py | 1 | 2982 | from contextlib import contextmanager
import inspect
import json
import vcr
def json_query_matcher(r1, r2):
"""
Match two queries by decoding json-encoded query args and comparing them
"""
if len(r1.query) != len(r2.query):
return False
for i,q in enumerate(r1.query):
if q[0] != r2.query[i][0]:
return False
try:
j1 = json.loads(q[1])
j2 = json.loads(r2.query[i][1])
if j1 != j2:
return False
except ValueError:
# If we were unable to decode json just compare the values normally
if q[1] != r2.query[i][1]:
return False
return True
def get_vcr(*args, **kwargs):
"""Return a VCR, with our custom matchers registered.
Params are passed to VCR init."""
v = vcr.VCR(*args, **kwargs)
# register custom matchers here
v.register_matcher('json_query', json_query_matcher)
return v
def get_filename_from_method(func, receiver):
"""Return an unambigious filename built from a test method invocation.
The method is assumed to be declared inside venmo_tests.
:attr func: the method's function object.
:attr receiver: the first argument to the method, i.e. self or cls.
"""
mod_name = func.__module__
if inspect.isclass(receiver):
class_name = receiver.__name__
else:
class_name = receiver.__class__.__name__
return "%s.%s.%s.yaml" % (mod_name, class_name, func.__name__)
def _get_subcassette_filename(name, parent_filename):
"""Return a cassette namespaced by a parent cassette filename.
For example::
>>> _get_subcassette_filename('foo', 'mytests.test_bar.yaml')
'mytests.test_bar.foo.yaml'
"""
parent_components = parent_filename.split('.')
parent_components.insert(len(parent_components) - 1, name)
return '.'.join(parent_components)
def get_namespace_cm(my_vcr, parent_filename, make_external_requests):
"""Return a context manager that uses a cassette namespaced under the parent.
The context manager takes two arguments:
* name: a string that names the cassette.
* match_on: (optional), passed to use_cassette to override the default.
"""
@contextmanager
def namespace_cm(name, match_on=None,
my_vr=my_vcr, parent_filename=parent_filename,
make_external_requests=make_external_requests):
if make_external_requests:
yield
else:
kwargs = {
'path': _get_subcassette_filename(name, parent_filename),
'match_on': match_on
}
if match_on is None:
# vcr doesn't use a sentinel for match_on;
# it just shouldn't be present to default it.
del kwargs['match_on']
with my_vcr.use_cassette(**kwargs):
yield
return namespace_cm
| gpl-2.0 | 6,299,087,452,579,828,000 | 28.524752 | 81 | 0.597586 | false |
Micronaet/micronaet-campaign | product_export_excel/wizard/__init__.py | 1 | 1164 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<https://micronaet.com>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import excel_export_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,882,593,326,034,636,000 | 43.769231 | 79 | 0.646907 | false |
flavour/eden | modules/s3/codecs/xls.py | 1 | 49164 | # -*- coding: utf-8 -*-
"""
S3 Microsoft Excel codec
@copyright: 2011-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3XLS",
)
from gluon import HTTP, current
from gluon.contenttype import contenttype
from gluon.storage import Storage
from s3compat import INTEGER_TYPES, BytesIO, xrange
from ..s3codec import S3Codec
from ..s3utils import s3_str, s3_strip_markup, s3_unicode, s3_get_foreign_key
# =============================================================================
class S3XLS(S3Codec):
"""
Simple Microsoft Excel format codec
"""
# The xlwt library supports a maximum of 182 characters in a single cell
MAX_CELL_SIZE = 182
# Customizable styles
COL_WIDTH_MULTIPLIER = 310
# Python xlwt Colours
# https://docs.google.com/spreadsheets/d/1ihNaZcUh7961yU7db1-Db0lbws4NT24B7koY8v8GHNQ/pubhtml?gid=1072579560&single=true
LARGE_HEADER_COLOUR = 0x2C # pale_blue
HEADER_COLOUR = 0x2C # pale_blue
SUB_HEADER_COLOUR = 0x18 # periwinkle
SUB_TOTALS_COLOUR = 0x96
TOTALS_COLOUR = 0x00
ROW_ALTERNATING_COLOURS = [0x2A, # light_green
0x2B, # light_yellow
]
ERROR = Storage(
XLRD_ERROR = "XLS export requires python-xlrd module to be installed on server",
XLWT_ERROR = "XLS export requires python-xlwt module to be installed on server",
)
# -------------------------------------------------------------------------
def extract(self, resource, list_fields):
"""
Extract the rows from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = dict(current.request.vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
resource.add_filter(query)
if orderby is None:
orderby = resource.get_config("orderby")
# Hierarchical FK Expansion:
# setting = {field_selector: [LevelLabel, LevelLabel, ...]}
expand_hierarchy = resource.get_config("xls_expand_hierarchy")
data = resource.select(list_fields,
left = left,
limit = None,
count = True,
getids = True,
orderby = orderby,
represent = True,
show_links = False,
raw_data = True if expand_hierarchy else False,
)
rfields = data.rfields
rows = data.rows
types = []
lfields = []
heading = {}
for rfield in rfields:
if rfield.show:
if expand_hierarchy:
levels = expand_hierarchy.get(rfield.selector)
else:
levels = None
if levels:
num_levels = len(levels)
colnames = self.expand_hierarchy(rfield, num_levels, rows)
lfields.extend(colnames)
types.extend(["string"] * num_levels)
T = current.T
for i, colname in enumerate(colnames):
heading[colname] = T(levels[i])
else:
lfields.append(rfield.colname)
heading[rfield.colname] = rfield.label or \
rfield.field.name.capitalize().replace("_", " ")
if rfield.ftype == "virtual":
types.append("string")
else:
types.append(rfield.ftype)
return (title, types, lfields, heading, rows)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a Microsoft Excel spreadsheet
@param resource: the source of the data that is to be encoded
as a spreadsheet, can be either of:
1) an S3Resource
2) an array of value dicts (dict of
column labels as first item, list of
field types as second item)
3) a dict like:
{columns: [key, ...],
headers: {key: label},
types: {key: type},
rows: [{key:value}],
}
@param attr: keyword arguments (see below)
@keyword as_stream: return the buffer (BytesIO) rather than
its contents (str), useful when the output
is supposed to be stored locally
@keyword title: the main title of the report
@keyword list_fields: fields to include in list views
@keyword report_groupby: used to create a grouping of the result:
either a Field object of the resource
or a string which matches a value in
the heading
@keyword use_colour: True to add colour to the cells, default False
@keyword evenodd: render different background colours
for even/odd rows ("stripes")
"""
# Do not redirect from here!
# ...but raise proper status code, which can be caught by caller
try:
import xlwt
except ImportError:
error = self.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
try:
from xlrd.xldate import xldate_from_date_tuple, \
xldate_from_time_tuple, \
xldate_from_datetime_tuple
except ImportError:
error = self.ERROR.XLRD_ERROR
current.log.error(error)
raise HTTP(503, body=error)
import datetime
MAX_CELL_SIZE = self.MAX_CELL_SIZE
COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER
# Get the attributes
title = attr.get("title")
if title is None:
title = current.T("Report")
list_fields = attr.get("list_fields")
group = attr.get("dt_group")
use_colour = attr.get("use_colour", False)
evenodd = attr.get("evenodd", True)
# Extract the data from the resource
if isinstance(resource, dict):
headers = resource.get("headers", {})
lfields = resource.get("columns", list_fields)
column_types = resource.get("types")
types = [column_types[col] for col in lfields]
rows = resource.get("rows")
elif isinstance(resource, (list, tuple)):
headers = resource[0]
types = resource[1]
rows = resource[2:]
lfields = list_fields
else:
if not list_fields:
list_fields = resource.list_fields()
(title, types, lfields, headers, rows) = self.extract(resource,
list_fields,
)
# Verify columns in items
request = current.request
if len(rows) > 0 and len(lfields) > len(rows[0]):
msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist
requesting url %s
Headers = %d, Data Items = %d
Headers %s
List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields)
current.log.error(msg)
# Grouping
report_groupby = lfields[group] if group else None
groupby_label = headers[report_groupby] if report_groupby else None
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
date_format = settings.get_L10n_date_format()
date_format_str = str(date_format)
dt_format_translate = self.dt_format_translate
date_format = dt_format_translate(date_format)
time_format = dt_format_translate(settings.get_L10n_time_format())
datetime_format = dt_format_translate(settings.get_L10n_datetime_format())
title_row = settings.get_xls_title_row()
# Get styles
styles = self._styles(use_colour = use_colour,
evenodd = evenodd,
datetime_format = datetime_format,
)
# Create the workbook
book = xlwt.Workbook(encoding="utf-8")
# Add sheets
sheets = []
# XLS exports are limited to 65536 rows per sheet, we bypass
# this by creating multiple sheets
row_limit = 65536
sheetnum = len(rows) / row_limit
# Can't have a / in the sheet_name, so replace any with a space
sheet_name = str(title.replace("/", " "))
if len(sheet_name) > 31:
# Sheet name cannot be over 31 chars
# (take sheet number suffix into account)
sheet_name = sheet_name[:31] if sheetnum == 1 else sheet_name[:28]
count = 1
while len(sheets) <= sheetnum:
sheets.append(book.add_sheet("%s-%s" % (sheet_name, count)))
count += 1
if callable(title_row):
# Calling with sheet None to get the number of title rows
title_row_length = title_row(None)
else:
title_row_length = 2
# Add header row to all sheets, determine columns widths
header_style = styles["header"]
for sheet in sheets:
# Move this down if a title row will be added
if title_row:
header_row = sheet.row(title_row_length)
else:
header_row = sheet.row(0)
column_widths = []
has_id = False
col_index = 0
for selector in lfields:
if selector == report_groupby:
continue
label = headers[selector]
if label == "Id":
# Indicate to adjust col_index when writing out
has_id = True
column_widths.append(0)
col_index += 1
continue
if label == "Sort":
continue
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
header_row.write(write_col_index, str(label), header_style)
width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
width = min(width, 65535) # USHRT_MAX
column_widths.append(width)
sheet.col(write_col_index).width = width
col_index += 1
title = s3_str(title)
# Title row (optional, deployment setting)
if title_row:
T = current.T
large_header_style = styles["large_header"]
notes_style = styles["notes"]
for sheet in sheets:
if callable(title_row):
# Custom title rows
title_row(sheet)
else:
# First row => Title (standard = "title_list" CRUD string)
current_row = sheet.row(0)
if col_index > 0:
sheet.write_merge(0, 0, 0, col_index,
title,
large_header_style,
)
current_row.height = 500
# Second row => Export date/time
current_row = sheet.row(1)
current_row.write(0, "%s:" % T("Date Exported"), notes_style)
current_row.write(1, request.now, notes_style)
# Fix the size of the last column to display the date
if 16 * COL_WIDTH_MULTIPLIER > width:
sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER
# Initialize counters
total_cols = col_index
# Move the rows down if a title row is included
if title_row:
row_index = title_row_length
else:
row_index = 0
# Helper function to get the current row
def get_current_row(row_count, row_limit):
sheet_count = int(row_count / row_limit)
row_number = row_count - (sheet_count * row_limit)
if sheet_count > 0:
row_number += 1
return sheets[sheet_count], sheets[sheet_count].row(row_number)
# Write the table contents
subheading = None
odd_style = styles["odd"]
even_style = styles["even"]
subheader_style = styles["subheader"]
for row in rows:
# Current row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
# Group headers
if report_groupby:
represent = s3_strip_markup(s3_unicode(row[report_groupby]))
if subheading != represent:
# Start of new group - write group header
subheading = represent
current_sheet.write_merge(row_index, row_index, 0, total_cols,
subheading,
subheader_style,
)
# Move on to next row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
col_index = 0
remaining_fields = lfields
# Custom row style?
row_style = None
if "_style" in row:
stylename = row["_style"]
if stylename in styles:
row_style = styles[stylename]
# Group header/footer row?
if "_group" in row:
group_info = row["_group"]
label = group_info.get("label")
totals = group_info.get("totals")
if label:
label = s3_strip_markup(s3_unicode(label))
style = row_style or subheader_style
span = group_info.get("span")
if span == 0:
current_sheet.write_merge(row_index,
row_index,
0,
total_cols - 1,
label,
style,
)
if totals:
# Write totals into the next row
row_index += 1
current_sheet, current_row = \
get_current_row(row_index, row_limit)
else:
current_sheet.write_merge(row_index,
row_index,
0,
span - 1,
label,
style,
)
col_index = span
remaining_fields = lfields[span:]
if not totals:
continue
for field in remaining_fields:
label = headers[field]
if label == groupby_label:
continue
if label == "Id":
# Skip the ID column from XLS exports
col_index += 1
continue
if field not in row:
represent = ""
else:
represent = s3_strip_markup(s3_unicode(row[field]))
coltype = types[col_index]
if coltype == "sort":
continue
if len(represent) > MAX_CELL_SIZE:
represent = represent[:MAX_CELL_SIZE]
value = represent
if coltype == "date":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day)
value = xldate_from_date_tuple(date_tuple, 0)
style.num_format_str = date_format
except:
pass
elif coltype == "datetime":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day,
cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_datetime_tuple(date_tuple, 0)
style.num_format_str = datetime_format
except:
pass
elif coltype == "time":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_time_tuple(date_tuple)
style.num_format_str = time_format
except:
pass
elif coltype == "integer":
try:
value = int(value)
style.num_format_str = "0"
except:
pass
elif coltype == "double":
try:
value = float(value)
style.num_format_str = "0.00"
except:
pass
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
current_row.write(write_col_index, value, style)
width = len(represent) * COL_WIDTH_MULTIPLIER
if width > column_widths[col_index]:
column_widths[col_index] = width
current_sheet.col(write_col_index).width = width
col_index += 1
# Additional sheet settings
for sheet in sheets:
sheet.panes_frozen = True
sheet.horz_split_pos = 1
# Write output
output = BytesIO()
book.save(output)
output.seek(0)
if attr.get("as_stream", False):
return output
# Response headers
filename = "%s_%s.xls" % (request.env.server_name, title)
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
return output.read()
# -------------------------------------------------------------------------
@staticmethod
def expand_hierarchy(rfield, num_levels, rows):
"""
Expand a hierarchical foreign key column into one column
per hierarchy level
@param rfield: the column (S3ResourceField)
@param num_levels: the number of levels (from root)
@param rows: the Rows from S3ResourceData
@returns: list of keys (column names) for the inserted columns
"""
field = rfield.field
if not field or rfield.ftype[:9] != "reference":
return []
# Get the look-up table
ktablename = s3_get_foreign_key(field, m2m=False)[0]
if not ktablename:
return []
colname = rfield.colname
represent = field.represent
# Get the hierarchy
from ..s3hierarchy import S3Hierarchy
h = S3Hierarchy(ktablename)
if not h.config:
return []
# Collect the values from rows
values = set()
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
values.add(value)
# Generate the expanded values
expanded = h.repr_expand(values,
levels = num_levels,
represent = represent,
)
# ...and add them into the rows
colnames = ["%s__%s" % (colname, l) for l in range(num_levels)]
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
hcols = expanded.get(value)
for level in range(num_levels):
row[colnames[level]] = hcols[level] if hcols else None
return colnames
# -------------------------------------------------------------------------
@staticmethod
def encode_pt(pt, title):
"""
Encode a S3PivotTable as XLS sheet
@param pt: the S3PivotTable
@param title: the title for the report
@returns: the XLS file as stream
"""
output = BytesIO()
book = S3PivotTableXLS(pt).encode(title)
book.save(output)
output.seek(0)
return output
# -------------------------------------------------------------------------
@staticmethod
def dt_format_translate(pyfmt):
"""
Translate a Python datetime format string into an
Excel datetime format string
@param pyfmt: the Python format string
"""
translate = {"%a": "ddd",
"%A": "dddd",
"%b": "mmm",
"%B": "mmmm",
"%c": "",
"%d": "dd",
"%f": "",
"%H": "hh",
"%I": "hh",
"%j": "",
"%m": "mm",
"%M": "mm",
"%p": "AM/PM",
"%S": "ss",
"%U": "",
"%w": "",
"%W": "",
"%x": "",
"%X": "",
"%y": "yy",
"%Y": "yyyy",
"%z": "",
"%Z": "",
}
PERCENT = "__percent__"
xlfmt = str(pyfmt).replace("%%", PERCENT)
for tag, translation in translate.items():
xlfmt = xlfmt.replace(tag, translation)
return xlfmt.replace(PERCENT, "%")
# -------------------------------------------------------------------------
@classmethod
def _styles(cls,
use_colour=False,
evenodd=True,
datetime_format=None,
):
"""
XLS encoder standard cell styles
@param use_colour: use background colour in cells
@param evenodd: render different background colours
for even/odd rows ("stripes")
@param datetime_format: the date/time format
"""
import xlwt
if datetime_format is None:
# Support easier usage from external functions
datetime_format = cls.dt_format_translate(current.deployment_settings.get_L10n_datetime_format())
# Styles
large_header = xlwt.XFStyle()
large_header.font.bold = True
large_header.font.height = 400
if use_colour:
SOLID_PATTERN = large_header.pattern.SOLID_PATTERN
large_header.alignment.horz = large_header.alignment.HORZ_CENTER
large_header.pattern.pattern = SOLID_PATTERN
large_header.pattern.pattern_fore_colour = cls.LARGE_HEADER_COLOUR
notes = xlwt.XFStyle()
notes.font.italic = True
notes.font.height = 160 # 160 Twips = 8 point
notes.num_format_str = datetime_format
header = xlwt.XFStyle()
header.font.bold = True
header.num_format_str = datetime_format
if use_colour:
header.pattern.pattern = SOLID_PATTERN
header.pattern.pattern_fore_colour = cls.HEADER_COLOUR
subheader = xlwt.XFStyle()
subheader.font.bold = True
if use_colour:
subheader.pattern.pattern = SOLID_PATTERN
subheader.pattern.pattern_fore_colour = cls.SUB_HEADER_COLOUR
subtotals = xlwt.XFStyle()
subtotals.font.bold = True
if use_colour:
subtotals.pattern.pattern = SOLID_PATTERN
subtotals.pattern.pattern_fore_colour = cls.SUB_TOTALS_COLOUR
totals = xlwt.XFStyle()
totals.font.bold = True
if use_colour:
totals.pattern.pattern = SOLID_PATTERN
totals.pattern.pattern_fore_colour = cls.TOTALS_COLOUR
odd = xlwt.XFStyle()
if use_colour and evenodd:
odd.pattern.pattern = SOLID_PATTERN
odd.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[0]
even = xlwt.XFStyle()
if use_colour and evenodd:
even.pattern.pattern = SOLID_PATTERN
even.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[1]
return {"large_header": large_header,
"notes": notes,
"header": header,
"subheader": subheader,
"subtotals": subtotals,
"totals": totals,
"odd": odd,
"even": even,
}
# =============================================================================
class S3PivotTableXLS(object):
"""
XLS encoder for S3PivotTables
@todo: merge+DRY with S3XLS?
@todo: support multiple layers (=write multiple sheets)
@todo: handle huge pivot tables (=exceeding XLS rows/cols limits)
"""
def __init__(self, pt):
"""
Constructor
@param pt: the S3PivotTable to encode
"""
self.pt = pt
# Initialize properties
self._styles = None
self._formats = None
self.lookup = {}
self.valuemap = {}
# -------------------------------------------------------------------------
def encode(self, title):
"""
Convert this pivot table into an XLS file
@param title: the title of the report
@returns: the XLS workbook
"""
try:
import xlwt
except ImportError:
error = S3XLS.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
T = current.T
TOTAL = s3_str(s3_unicode(T("Total")).upper())
pt = self.pt
# Get report options
report_options = pt.resource.get_config("report_options", {})
# Report dimensions
fact = pt.facts[0]
layer = fact.layer
rows_dim = pt.rows
cols_dim = pt.cols
numrows = pt.numrows
numcols = pt.numcols
# Resource fields for dimensions
rfields = pt.rfields
fact_rfield = rfields[fact.selector]
rows_rfield = rfields[rows_dim] if rows_dim else None
cols_rfield = rfields[cols_dim] if cols_dim else None
# Dimension labels
get_label = fact._get_field_label
if rows_dim:
# Get row axis label
rows_label = s3_str(get_label(rows_rfield,
report_options.get("rows"),
))
else:
rows_label = ""
if cols_dim:
cols_label = s3_str(get_label(cols_rfield,
report_options.get("cols"),
))
else:
cols_label = ""
fact_label = s3_str(fact.get_label(fact_rfield,
report_options.get("fact"),
))
# Index of the column for row totals
total_column = (numcols + 1) if cols_dim else 1
# Sort+represent rows and columns
rows, cols = self.sortrepr()
# Create workbook and sheet
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet(s3_str(title))
write = self.write
# Write header
title_row = current.deployment_settings.get_xls_title_row()
if callable(title_row):
# Custom header (returns number of header rows)
title_length = title_row(sheet)
elif title_row:
# Default header
title_length = 2
# Report title
write(sheet, 0, 0, s3_str(title),
colspan = numcols + 2,
style = "title",
)
# Current date/time (in local timezone)
from ..s3datetime import S3DateTime
dt = S3DateTime.to_local(current.request.utcnow)
write(sheet, 1, 0, dt, style = "subheader", numfmt = "datetime")
else:
# No header
title_length = -1
rowindex = title_length + 1
# Fact label
if rows_dim and cols_dim:
write(sheet, rowindex, 0, fact_label, style="fact_label")
# Columns axis title
if cols_dim:
write(sheet, rowindex, 1, cols_label,
colspan = numcols,
style = "axis_title",
)
rowindex += 1
# Row axis title
write(sheet, rowindex, 0, rows_label, style="axis_title")
# Column labels
if cols_dim:
for idx, col in enumerate(cols):
write(sheet, rowindex, idx + 1, col[2]["text"],
style = "col_label",
)
total_label = TOTAL
else:
# Use fact title as row total label if there is no column axis
total_label = fact_label
# Row totals label
write(sheet, rowindex, total_column, total_label, style="total_right")
# Determine the number format for cell values
numfmt = self.number_format()
totfmt = "integer" if fact.method in ("count", "list") else numfmt
# Choose cell value style according to number format
fact_style = "numeric" if numfmt else None
# Get fact representation method
if fact.method == "list":
listrepr = self.listrepr
fk, fact_repr = pt._represents([layer])[fact.selector]
else:
listrepr = fk = fact_repr = None
# Write data rows (if any)
rowindex += 1
if rows_dim:
icell = pt.cell
for i in xrange(numrows):
row = rows[i]
# Row-label
write(sheet, rowindex + i, 0, row[2]["text"],
style = "row_label",
)
# Cell column values (if any)
if cols_dim:
for j in xrange(numcols):
cell = icell[row[0]][cols[j][0]]
if listrepr:
value = listrepr(cell, fact_rfield, fact_repr, fk=fk)
else:
value = cell[layer]
write(sheet, rowindex + i, j + 1, value,
numfmt = numfmt,
style = fact_style,
)
# Row-total
write(sheet, rowindex + i, total_column, row[1],
style = "total",
numfmt = totfmt,
)
rowindex += numrows
total_label = TOTAL
else:
# Use fact label as column totals label if
# there is no row dimension
total_label = fact_label
# Column totals label
write(sheet, rowindex, 0, total_label, style="total_left")
# Column totals
if cols_dim:
for i in xrange(numcols):
write(sheet, rowindex, i + 1, cols[i][1],
style = "total",
numfmt = totfmt,
)
# Grand total
total = pt.totals[layer]
write(sheet, rowindex, total_column, total,
style = "grand_total",
numfmt = totfmt,
)
return book
# -------------------------------------------------------------------------
def write(self,
sheet,
rowindex,
colindex,
value,
style=None,
numfmt=None,
rowspan=None,
colspan=None,
adjust=True
):
"""
Write a value to a spreadsheet cell
@param sheet: the work sheet
@param rowindex: the row index of the cell
@param colindex: the column index of the cell
@param value: the value to write
@param style: a style name (see styles property)
@param numfmt: a number format name (see formats property)
@param rowspan: number of rows to merge
@param colspan: number of columns to merge
@param adjust: True to adjust column width and row height,
False to suppress automatic adjustment
"""
styles = self.styles
if style:
style = styles.get(style)
if style is None:
style = styles["default"]
# Apply number format
if numfmt:
style.num_format_str = self.formats.get(numfmt, "")
# Get the row
row = sheet.row(rowindex)
if type(value) is list:
labels = [s3_str(v) for v in value]
contents = "\n".join(labels)
else:
labels = [s3_str(value)]
contents = value
# Apply rowspan and colspan
rowspan = 0 if not rowspan or rowspan < 1 else rowspan - 1
colspan = 0 if not colspan or colspan < 1 else colspan - 1
if rowspan > 1 or colspan > 1:
# Write-merge
sheet.write_merge(rowindex, rowindex + rowspan,
colindex, colindex + colspan,
contents,
style,
)
else:
# Just write
row.write(colindex, contents, style)
# Reset number format
style.num_format_str = ""
# Adjust column width and row height
# NB approximations, no exact science (not possible except by
# enforcing a particular fixed-width font, which we don't
# want), so manual adjustments after export may still be
# necessary. Better solutions welcome!
if adjust:
fontsize = float(style.font.height)
# Adjust column width
col = sheet.col(colindex)
if not colspan:
if labels:
width = int(min(max(len(l) for l in labels), 28) *
fontsize * 5.0 / 3.0)
else:
width = 0
if width > col.width:
col.width = width
# Adjust row height
if not rowspan:
lineheight = 1.2 if style.font.bold else 1.0
import math
numlines = 0
width = (col.width * 0.8 * (colspan + 1))
for label in labels:
numlines += math.ceil(len(label) * fontsize / width)
if numlines > 1:
lines = min(numlines, 10)
height = int((lines + 0.8 / lineheight) *
fontsize * lineheight)
else:
height = int(fontsize * lineheight)
if height > row.height:
row.height = height
row.height_mismatch = 1
# -------------------------------------------------------------------------
@property
def styles(self):
"""
Style definitions for pivot tables (lazy property)
@returns: dict of named XFStyle instances
"""
styles = self._styles
if styles is None:
from xlwt import Alignment, XFStyle
# Alignments
center = Alignment()
center.horz = Alignment.HORZ_CENTER
center.vert = Alignment.VERT_CENTER
center.wrap = 1
centerleft = Alignment()
centerleft.horz = Alignment.HORZ_LEFT
centerleft.vert = Alignment.VERT_CENTER
centerleft.wrap = 1
bottomcentered = Alignment()
bottomcentered.horz = Alignment.HORZ_CENTER
bottomcentered.vert = Alignment.VERT_BOTTOM
bottomcentered.wrap = 1
bottomleft = Alignment()
bottomleft.horz = Alignment.HORZ_LEFT
bottomleft.vert = Alignment.VERT_BOTTOM
bottomleft.wrap = 1
bottomright = Alignment()
bottomright.horz = Alignment.HORZ_RIGHT
bottomright.vert = Alignment.VERT_BOTTOM
bottomright.wrap = 1
topleft = Alignment()
topleft.horz = Alignment.HORZ_LEFT
topleft.vert = Alignment.VERT_TOP
topleft.wrap = 1
topright = Alignment()
topright.horz = Alignment.HORZ_RIGHT
topright.vert = Alignment.VERT_TOP
topright.wrap = 1
# Styles
twips = lambda pt: 20 * pt # Points to Twips
def style(fontsize=10, bold=False, italic=False, align=None):
""" XFStyle builder helper """
style = XFStyle()
style.font.height = twips(fontsize)
style.font.bold = bold
style.font.italic = italic
if align is not None:
style.alignment = align
return style
self._styles = styles = {
"default": style(align=topleft),
"numeric": style(align=bottomright),
"title": style(fontsize=14, bold=True, align=bottomleft),
"subheader": style(fontsize=8, italic=True, align=bottomleft),
"row_label": style(bold=True, align=topleft),
"col_label": style(bold=True, align=bottomcentered),
"fact_label": style(fontsize=13, bold=True, align=centerleft),
"axis_title": style(fontsize=11, bold=True, align=center),
"total": style(fontsize=11, bold=True, italic=True, align=topright),
"total_left": style(fontsize=11, bold=True, italic=True, align=topleft),
"total_right": style(fontsize=11, bold=True, italic=True, align=center),
"grand_total": style(fontsize=12, bold=True, italic=True, align=topright),
}
return styles
# -------------------------------------------------------------------------
@property
def formats(self):
"""
Number formats for pivot tables (lazy property)
@returns: dict of format strings
"""
formats = self._formats
if formats is None:
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
translate = S3XLS.dt_format_translate
date_format = translate(settings.get_L10n_date_format())
datetime_format = translate(settings.get_L10n_datetime_format())
time_format = translate(settings.get_L10n_time_format())
formats = {
"date": date_format,
"datetime": datetime_format,
"time": time_format,
"integer": "0",
"double": "0.00"
}
self._formats = formats
return formats
# -------------------------------------------------------------------------
def number_format(self):
"""
Determine the number format for this pivot table
@returns: the number format key (see formats property)
"""
numfmt = None
pt = self.pt
fact = pt.facts[0]
rfield = pt.rfields[fact.selector]
ftype = rfield.ftype
if fact.method == "count":
numfmt = "integer"
elif ftype == "integer":
if fact.method == "avg":
# Average value of ints is a float
numfmt = "double"
else:
numfmt = "integer"
elif ftype in ("date", "datetime", "time", "double"):
numfmt = ftype
elif ftype == "virtual":
# Probe the first value
value = pt.cell[0][0][fact.layer]
if isinstance(value, INTEGER_TYPES):
numfmt = "integer"
elif isinstance(value, float):
numfmt = "double"
else:
import datetime
if isinstance(value, datetime.datetime):
numfmt = "datetime"
elif isinstance(value, datetime.date):
numfmt = "date"
elif isinstance(value, datetime.time):
numfmt = "time"
return numfmt
# -------------------------------------------------------------------------
def sortrepr(self):
"""
Sort and represent pivot table axes
@returns: tuple (rows, cols), each a list of tuples:
(index, ...the index of the row/column in
the original cell array
total, ...total value of the row/column
{value: axis_value, ...group value of the row/column
text: axis_repr, ...representation of the group value
},
)
"""
pt = self.pt
rfields = pt.rfields
layer = pt.facts[0].layer
# Sort rows
rows_dim = pt.rows
rows_rfield = rfields[rows_dim] if rows_dim else None
row_repr = pt._represent_method(rows_dim)
irows = pt.row
rows = []
for i in xrange(pt.numrows):
irow = irows[i]
header = {"value": irow.value,
"text": irow.text if "text" in irow
else row_repr(irow.value),
}
rows.append((i, irow[layer], header))
pt._sortdim(rows, rows_rfield, index=2)
# Sort columns
cols_dim = pt.cols
cols_rfield = rfields[cols_dim] if cols_dim else None
col_repr = pt._represent_method(cols_dim)
icols = pt.col
cols = []
for i in xrange(pt.numcols):
icol = icols[i]
header = {"value": icol.value,
"text": icol.text if "text" in icol
else col_repr(icol.value),
}
cols.append((i, icol[layer], header))
pt._sortdim(cols, cols_rfield, index=2)
return rows, cols
# -------------------------------------------------------------------------
def listrepr(self, cell, rfield, represent, fk=True):
"""
Represent and sort a list of cell values (for "list" aggregation
method)
@param cell - the cell data
@param rfield - the fact S3ResourceField
@param represent - representation method for the fact field
@param fk - fact field is a foreign key
@returns: sorted list of represented cell values
"""
pt = self.pt
records = pt.records
colname = rfield.colname
lookup = self.lookup
valuemap = self.valuemap
keys = []
for record_id in cell["records"]:
record = records[record_id]
try:
fvalue = record[colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if fk:
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = represent(v)
else:
if v not in valuemap:
next_id = len(valuemap)
valuemap[v] = next_id
keys.append(next_id)
lookup[next_id] = represent(v)
else:
prev_id = valuemap[v]
if prev_id not in keys:
keys.append(prev_id)
keys.sort(key=lambda i: lookup[i])
items = [s3_str(lookup[key]) for key in keys if key in lookup]
return items
# =============================================================================
#class S3HTML2XLS(object):
# """
# Class that takes HTML in the form of web2py helper objects
# and converts it to XLS
#
# @ToDo: Complete this (e.g. start with a copy of S3html2pdf)
# See https://gist.github.com/JustOnce/2be3e4d951a66c22c5e0
# & http://pydoc.net/Python/Kiowa/0.2w.rc9/kiowa.utils.xls.html2xls/
#
# Places to use this:
# org_CapacityReport()
# """
#
# def __init__(self):
#
# pass
#
# # -------------------------------------------------------------------------
# def parse(self, html):
# """
# Entry point for class
# """
#
# return None
#
# END =========================================================================
| mit | -3,162,987,197,995,865,000 | 34.964887 | 124 | 0.467252 | false |
denisenkom/django | tests/serializers_regress/tests.py | 1 | 21745 | """
A test spanning all the capabilities of all the serializers.
This class defines sample data and a dynamically generated
test case that is capable of testing the capabilities of
the serializers. This includes all valid data values, plus
forward, backwards and self references.
"""
from __future__ import unicode_literals
import datetime
import decimal
from unittest import expectedFailure, skipUnless
try:
import yaml
except ImportError:
yaml = None
from django.core import serializers
from django.core.serializers import SerializerDoesNotExist
from django.core.serializers.base import DeserializationError
from django.core.serializers.xml_serializer import DTDForbidden
from django.db import connection, models
from django.http import HttpResponse
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import curry
from .models import (BinaryData, BooleanData, CharData, DateData, DateTimeData, EmailData,
FileData, FilePathData, DecimalData, FloatData, IntegerData, IPAddressData,
GenericIPAddressData, NullBooleanData, PositiveIntegerData,
PositiveSmallIntegerData, SlugData, SmallData, TextData, TimeData,
GenericData, Anchor, UniqueAnchor, FKData, M2MData, O2OData,
FKSelfData, M2MSelfData, FKDataToField, FKDataToO2O, M2MIntermediateData,
Intermediate, BooleanPKData, CharPKData, EmailPKData, FilePathPKData,
DecimalPKData, FloatPKData, IntegerPKData, IPAddressPKData,
GenericIPAddressPKData, PositiveIntegerPKData,
PositiveSmallIntegerPKData, SlugPKData, SmallPKData,
AutoNowDateTimeData, ModifyingSaveData, InheritAbstractModel, BaseModel,
ExplicitInheritBaseModel, InheritBaseModel, ProxyBaseModel,
ProxyProxyBaseModel, BigIntegerData, LengthModel, Tag, ComplexModel,
NaturalKeyAnchor, FKDataNaturalKey)
# A set of functions that can be used to recreate
# test data objects of various kinds.
# The save method is a raw base model save, to make
# sure that the data in the database matches the
# exact test case.
def data_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def generic_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data[0]
models.Model.save_base(instance, raw=True)
for tag in data[1:]:
instance.tags.create(data=tag)
return [instance]
def fk_create(pk, klass, data):
instance = klass(id=pk)
setattr(instance, 'data_id', data)
models.Model.save_base(instance, raw=True)
return [instance]
def m2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
instance.data = data
return [instance]
def im2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
return [instance]
def im_create(pk, klass, data):
instance = klass(id=pk)
instance.right_id = data['right']
instance.left_id = data['left']
if 'extra' in data:
instance.extra = data['extra']
models.Model.save_base(instance, raw=True)
return [instance]
def o2o_create(pk, klass, data):
instance = klass()
instance.data_id = data
models.Model.save_base(instance, raw=True)
return [instance]
def pk_create(pk, klass, data):
instance = klass()
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def inherited_create(pk, klass, data):
instance = klass(id=pk,**data)
# This isn't a raw save because:
# 1) we're testing inheritance, not field behavior, so none
# of the field values need to be protected.
# 2) saving the child class and having the parent created
# automatically is easier than manually creating both.
models.Model.save(instance)
created = [instance]
for klass,field in instance._meta.parents.items():
created.append(klass.objects.get(id=pk))
return created
# A set of functions that can be used to compare
# test data objects of various kinds
def data_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
if klass == BinaryData and data is not None:
testcase.assertEqual(bytes(data), bytes(instance.data),
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, repr(bytes(data)), type(data), repr(bytes(instance.data)),
type(instance.data))
)
else:
testcase.assertEqual(data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, data, type(data), instance, type(instance.data))
)
def generic_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data[0], instance.data)
testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')])
def fk_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, instance.data_id)
def m2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')])
def im2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
#actually nothing else to check, the instance just should exist
def im_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data['left'], instance.left_id)
testcase.assertEqual(data['right'], instance.right_id)
if 'extra' in data:
testcase.assertEqual(data['extra'], instance.extra)
else:
testcase.assertEqual("doesn't matter", instance.extra)
def o2o_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data_id)
def pk_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data)
def inherited_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
for key,value in data.items():
testcase.assertEqual(value, getattr(instance,key))
# Define some data types. Each data type is
# actually a pair of functions; one to create
# and one to compare objects of that type
data_obj = (data_create, data_compare)
generic_obj = (generic_create, generic_compare)
fk_obj = (fk_create, fk_compare)
m2m_obj = (m2m_create, m2m_compare)
im2m_obj = (im2m_create, im2m_compare)
im_obj = (im_create, im_compare)
o2o_obj = (o2o_create, o2o_compare)
pk_obj = (pk_create, pk_compare)
inherited_obj = (inherited_create, inherited_compare)
test_data = [
# Format: (data type, PK value, Model Class, data)
(data_obj, 1, BinaryData, six.memoryview(b"\x05\xFD\x00")),
(data_obj, 2, BinaryData, None),
(data_obj, 5, BooleanData, True),
(data_obj, 6, BooleanData, False),
(data_obj, 10, CharData, "Test Char Data"),
(data_obj, 11, CharData, ""),
(data_obj, 12, CharData, "None"),
(data_obj, 13, CharData, "null"),
(data_obj, 14, CharData, "NULL"),
(data_obj, 15, CharData, None),
# (We use something that will fit into a latin1 database encoding here,
# because that is still the default used on many system setups.)
(data_obj, 16, CharData, '\xa5'),
(data_obj, 20, DateData, datetime.date(2006,6,16)),
(data_obj, 21, DateData, None),
(data_obj, 30, DateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 31, DateTimeData, None),
(data_obj, 40, EmailData, "[email protected]"),
(data_obj, 41, EmailData, None),
(data_obj, 42, EmailData, ""),
(data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'),
# (data_obj, 51, FileData, None),
(data_obj, 52, FileData, ""),
(data_obj, 60, FilePathData, "/foo/bar/whiz.txt"),
(data_obj, 61, FilePathData, None),
(data_obj, 62, FilePathData, ""),
(data_obj, 70, DecimalData, decimal.Decimal('12.345')),
(data_obj, 71, DecimalData, decimal.Decimal('-12.345')),
(data_obj, 72, DecimalData, decimal.Decimal('0.0')),
(data_obj, 73, DecimalData, None),
(data_obj, 74, FloatData, 12.345),
(data_obj, 75, FloatData, -12.345),
(data_obj, 76, FloatData, 0.0),
(data_obj, 77, FloatData, None),
(data_obj, 80, IntegerData, 123456789),
(data_obj, 81, IntegerData, -123456789),
(data_obj, 82, IntegerData, 0),
(data_obj, 83, IntegerData, None),
#(XX, ImageData
(data_obj, 90, IPAddressData, "127.0.0.1"),
(data_obj, 91, IPAddressData, None),
(data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
(data_obj, 96, GenericIPAddressData, None),
(data_obj, 100, NullBooleanData, True),
(data_obj, 101, NullBooleanData, False),
(data_obj, 102, NullBooleanData, None),
(data_obj, 120, PositiveIntegerData, 123456789),
(data_obj, 121, PositiveIntegerData, None),
(data_obj, 130, PositiveSmallIntegerData, 12),
(data_obj, 131, PositiveSmallIntegerData, None),
(data_obj, 140, SlugData, "this-is-a-slug"),
(data_obj, 141, SlugData, None),
(data_obj, 142, SlugData, ""),
(data_obj, 150, SmallData, 12),
(data_obj, 151, SmallData, -12),
(data_obj, 152, SmallData, 0),
(data_obj, 153, SmallData, None),
(data_obj, 160, TextData, """This is a long piece of text.
It contains line breaks.
Several of them.
The end."""),
(data_obj, 161, TextData, ""),
(data_obj, 162, TextData, None),
(data_obj, 170, TimeData, datetime.time(10,42,37)),
(data_obj, 171, TimeData, None),
(generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']),
(generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']),
(data_obj, 300, Anchor, "Anchor 1"),
(data_obj, 301, Anchor, "Anchor 2"),
(data_obj, 302, UniqueAnchor, "UAnchor 1"),
(fk_obj, 400, FKData, 300), # Post reference
(fk_obj, 401, FKData, 500), # Pre reference
(fk_obj, 402, FKData, None), # Empty reference
(m2m_obj, 410, M2MData, []), # Empty set
(m2m_obj, 411, M2MData, [300,301]), # Post reference
(m2m_obj, 412, M2MData, [500,501]), # Pre reference
(m2m_obj, 413, M2MData, [300,301,500,501]), # Pre and Post reference
(o2o_obj, None, O2OData, 300), # Post reference
(o2o_obj, None, O2OData, 500), # Pre reference
(fk_obj, 430, FKSelfData, 431), # Pre reference
(fk_obj, 431, FKSelfData, 430), # Post reference
(fk_obj, 432, FKSelfData, None), # Empty reference
(m2m_obj, 440, M2MSelfData, []),
(m2m_obj, 441, M2MSelfData, []),
(m2m_obj, 442, M2MSelfData, [440, 441]),
(m2m_obj, 443, M2MSelfData, [445, 446]),
(m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]),
(m2m_obj, 445, M2MSelfData, []),
(m2m_obj, 446, M2MSelfData, []),
(fk_obj, 450, FKDataToField, "UAnchor 1"),
(fk_obj, 451, FKDataToField, "UAnchor 2"),
(fk_obj, 452, FKDataToField, None),
(fk_obj, 460, FKDataToO2O, 300),
(im2m_obj, 470, M2MIntermediateData, None),
#testing post- and prereferences and extra fields
(im_obj, 480, Intermediate, {'right': 300, 'left': 470}),
(im_obj, 481, Intermediate, {'right': 300, 'left': 490}),
(im_obj, 482, Intermediate, {'right': 500, 'left': 470}),
(im_obj, 483, Intermediate, {'right': 500, 'left': 490}),
(im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}),
(im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}),
(im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}),
(im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}),
(im2m_obj, 490, M2MIntermediateData, []),
(data_obj, 500, Anchor, "Anchor 3"),
(data_obj, 501, Anchor, "Anchor 4"),
(data_obj, 502, UniqueAnchor, "UAnchor 2"),
(pk_obj, 601, BooleanPKData, True),
(pk_obj, 602, BooleanPKData, False),
(pk_obj, 610, CharPKData, "Test Char PKData"),
# (pk_obj, 620, DatePKData, datetime.date(2006,6,16)),
# (pk_obj, 630, DateTimePKData, datetime.datetime(2006,6,16,10,42,37)),
(pk_obj, 640, EmailPKData, "[email protected]"),
# (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'),
(pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"),
(pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')),
(pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')),
(pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')),
(pk_obj, 673, FloatPKData, 12.345),
(pk_obj, 674, FloatPKData, -12.345),
(pk_obj, 675, FloatPKData, 0.0),
(pk_obj, 680, IntegerPKData, 123456789),
(pk_obj, 681, IntegerPKData, -123456789),
(pk_obj, 682, IntegerPKData, 0),
# (XX, ImagePKData
(pk_obj, 690, IPAddressPKData, "127.0.0.1"),
(pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
# (pk_obj, 700, NullBooleanPKData, True),
# (pk_obj, 701, NullBooleanPKData, False),
(pk_obj, 720, PositiveIntegerPKData, 123456789),
(pk_obj, 730, PositiveSmallIntegerPKData, 12),
(pk_obj, 740, SlugPKData, "this-is-a-slug"),
(pk_obj, 750, SmallPKData, 12),
(pk_obj, 751, SmallPKData, -12),
(pk_obj, 752, SmallPKData, 0),
# (pk_obj, 760, TextPKData, """This is a long piece of text.
# It contains line breaks.
# Several of them.
# The end."""),
# (pk_obj, 770, TimePKData, datetime.time(10,42,37)),
# (pk_obj, 790, XMLPKData, "<foo></foo>"),
(data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 810, ModifyingSaveData, 42),
(inherited_obj, 900, InheritAbstractModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 910, ExplicitInheritBaseModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 920, InheritBaseModel, {'child_data':37,'parent_data':42}),
(data_obj, 1000, BigIntegerData, 9223372036854775807),
(data_obj, 1001, BigIntegerData, -9223372036854775808),
(data_obj, 1002, BigIntegerData, 0),
(data_obj, 1003, BigIntegerData, None),
(data_obj, 1004, LengthModel, 0),
(data_obj, 1005, LengthModel, 1),
]
natural_key_test_data = [
(data_obj, 1100, NaturalKeyAnchor, "Natural Key Anghor"),
(fk_obj, 1101, FKDataNaturalKey, 1100),
(fk_obj, 1102, FKDataNaturalKey, None),
]
# Because Oracle treats the empty string as NULL, Oracle is expected to fail
# when field.empty_strings_allowed is True and the value is None; skip these
# tests.
if connection.features.interprets_empty_strings_as_nulls:
test_data = [data for data in test_data
if not (data[0] == data_obj and
data[2]._meta.get_field('data').empty_strings_allowed and
data[3] is None)]
# Regression test for #8651 -- a FK to an object iwth PK of 0
# This won't work on MySQL since it won't let you create an object
# with a primary key of 0,
if connection.features.allows_primary_key_0:
test_data.extend([
(data_obj, 0, Anchor, "Anchor 0"),
(fk_obj, 465, FKData, 0),
])
# Dynamically create serializer tests to ensure that all
# registered serializers are automatically tested.
class SerializerTests(TestCase):
def test_get_unknown_serializer(self):
"""
#15889: get_serializer('nonsense') raises a SerializerDoesNotExist
"""
with self.assertRaises(SerializerDoesNotExist):
serializers.get_serializer("nonsense")
with self.assertRaises(KeyError):
serializers.get_serializer("nonsense")
# SerializerDoesNotExist is instantiated with the nonexistent format
with self.assertRaises(SerializerDoesNotExist) as cm:
serializers.get_serializer("nonsense")
self.assertEqual(cm.exception.args, ("nonsense",))
def test_unregister_unkown_serializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.unregister_serializer("nonsense")
def test_get_unkown_deserializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.get_deserializer("nonsense")
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("json", """[{"pk":1}"""):
pass
@skipUnless(yaml, "PyYAML not installed")
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
def test_serialize_proxy_model(self):
BaseModel.objects.create(parent_data=1)
base_objects = BaseModel.objects.all()
proxy_objects = ProxyBaseModel.objects.all()
proxy_proxy_objects = ProxyProxyBaseModel.objects.all()
base_data = serializers.serialize("json", base_objects)
proxy_data = serializers.serialize("json", proxy_objects)
proxy_proxy_data = serializers.serialize("json", proxy_proxy_objects)
self.assertEqual(base_data, proxy_data.replace('proxy', ''))
self.assertEqual(base_data, proxy_proxy_data.replace('proxy', ''))
def serializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Add the generic tagged objects to the object list
objects.extend(Tag.objects.all())
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
serializerTest = expectedFailure(serializerTest)
def naturalKeySerializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in natural_key_test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2,
use_natural_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in natural_key_test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
def fieldsTest(format, self):
obj = ComplexModel(field1='first', field2='second', field3='third')
obj.save_base(raw=True)
# Serialize then deserialize the test database
serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1','field3'))
result = next(serializers.deserialize(format, serialized_data))
# Check that the deserialized object contains data in only the serialized fields.
self.assertEqual(result.object.field1, 'first')
self.assertEqual(result.object.field2, '')
self.assertEqual(result.object.field3, 'third')
def streamTest(format, self):
obj = ComplexModel(field1='first',field2='second',field3='third')
obj.save_base(raw=True)
# Serialize the test database to a stream
for stream in (six.StringIO(), HttpResponse()):
serializers.serialize(format, [obj], indent=2, stream=stream)
# Serialize normally for a comparison
string_data = serializers.serialize(format, [obj], indent=2)
# Check that the two are the same
if isinstance(stream, six.StringIO):
self.assertEqual(string_data, stream.getvalue())
else:
self.assertEqual(string_data, stream.content.decode('utf-8'))
for format in serializers.get_serializer_formats():
setattr(SerializerTests, 'test_' + format + '_serializer', curry(serializerTest, format))
setattr(SerializerTests, 'test_' + format + '_natural_key_serializer', curry(naturalKeySerializerTest, format))
setattr(SerializerTests, 'test_' + format + '_serializer_fields', curry(fieldsTest, format))
if format != 'python':
setattr(SerializerTests, 'test_' + format + '_serializer_stream', curry(streamTest, format))
class XmlDeserializerSecurityTests(TestCase):
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = '<?xml version="1.0" standalone="no"?><!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
with self.assertRaises(DTDForbidden):
next(serializers.deserialize('xml', xml))
| bsd-3-clause | -7,877,821,534,166,548,000 | 38.826007 | 115 | 0.668935 | false |
ErickMurillo/geodjango-example | world/migrations/0001_initial.py | 1 | 1422 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='WorldBorder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('area', models.IntegerField()),
('pop2005', models.IntegerField(verbose_name=b'Population 2005')),
('fips', models.CharField(max_length=2, verbose_name=b'FIPS Code')),
('iso2', models.CharField(max_length=2, verbose_name=b'2 Digit ISO')),
('iso3', models.CharField(max_length=3, verbose_name=b'3 Digit ISO')),
('un', models.IntegerField(verbose_name=b'United Nations Code')),
('region', models.IntegerField(verbose_name=b'Region Code')),
('subregion', models.IntegerField(verbose_name=b'Sub-Region Code')),
('lon', models.FloatField()),
('lat', models.FloatField()),
('mpoly', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
| mit | -1,354,117,714,438,932,500 | 39.628571 | 114 | 0.563291 | false |
shiquanwang/numba | numba/support/cffi_support.py | 1 | 2857 | # -*- coding: utf-8 -*-
"""
Support for CFFI. Allows checking whether objects are CFFI functions and
obtaining the pointer and numba signature.
"""
from __future__ import print_function, division, absolute_import
from numba import *
from numba.minivect.minitypes import *
from numba.minivect import minitypes, minierror
try:
import cffi
ffi = cffi.FFI()
except ImportError:
ffi = None
def is_cffi_func(obj):
"Check whether the obj is a CFFI function"
try:
return type(obj) is cffi_func_type
# This is dangerous:
# >>> ffi.typeof("void (*)()")
# <ctype 'void(*)()'>
# return ffi.typeof(obj).kind == 'function'
except TypeError:
return False
def get_pointer(cffi_func):
"""
Get a pointer to the underlying function for a CFFI function as an
integer.
"""
return int(ffi.cast("uintptr_t", cffi_func))
def map_type(cffi_type):
"Map CFFI type to numba type"
if cffi_type.kind in ('struct', 'union'):
if cffi_type.kind == 'union':
result = None
else:
result = struct([(name, map_type(field_type))
for name, field_type in cffi_type.fields])
elif cffi_type.kind == 'function':
restype = map_type(cffi_type.result)
argtypes = [map_type(arg) for arg in cffi_type.args]
result = minitypes.FunctionType(restype, argtypes,
is_vararg=cffi_type.ellipsis).pointer()
else:
result = type_map.get(cffi_type)
if result is None:
raise minierror.UnmappableTypeError(cffi_type)
return result
def get_signature(cffi_func):
"Get the numba signature for a CFFI function"
return map_type(ffi.typeof(cffi_func)).base_type
if ffi is None:
# Disable cffi support
is_cffi_func = lambda x: False
type_map = None
else:
type_map = {
ffi.typeof('char') : char,
ffi.typeof('short') : short,
ffi.typeof('int') : int_,
ffi.typeof('long') : long_,
ffi.typeof('long long') : longlong,
ffi.typeof('unsigned char') : uchar,
ffi.typeof('unsigned short') : ushort,
ffi.typeof('unsigned int') : uint,
ffi.typeof('unsigned long') : ulong,
ffi.typeof('unsigned long long') : ulonglong,
ffi.typeof('float') : float_,
ffi.typeof('double') : double,
ffi.typeof('long double') : longdouble,
ffi.typeof('char *') : c_string_type,
ffi.typeof('ssize_t') : Py_ssize_t,
ffi.typeof('size_t') : size_t,
}
ffi.cdef("int printf(char *, ...);")
lib = ffi.dlopen(None)
cffi_func_type = type(lib.printf)
| bsd-2-clause | 2,206,667,430,541,992,200 | 31.83908 | 79 | 0.559328 | false |
CosmosID/cosmosid-cli | cosmosid/api/analysis.py | 1 | 3803 | """Representation of Analysis."""
import logging
import requests
from cosmosid.api.files import Runs
from cosmosid.helpers.exceptions import (AuthenticationFailed,
CosmosidException,
NotFoundException)
LOGGER = logging.getLogger(__name__)
class Analysis(object):
"""Runs analysis interface."""
__resource_path = '/api/metagenid/v1/runs/{run_id}/analysis'
def __init__(self, base_url=None, api_key=None):
self.base_url = base_url
self.logger = LOGGER
self.header = {'X-Api-Key': api_key}
self.request_url = "{}{}".format(self.base_url, self.__resource_path)
self.runs = Runs(base_url=self.base_url,
api_key=self.header['X-Api-Key'])
def __is_runid_in_file(self, run_id, file_id):
"""Get given run meta and check is the run in sample."""
single_run = self.runs.get_single_run(run_id)
if single_run:
if single_run['status']:
if single_run['file']['id'] == file_id:
return True
return False
def __get_analysis_by_file_id(self, file_id):
last_run = self.runs.get_last_run_for_file(file_id)
result_data = None
if last_run:
result_data = self.__get_analysis_by_run_id(last_run['id'])
return result_data
def __get_analysis_by_run_id(self, run_id):
request_url = self.request_url.format(run_id=run_id)
try:
single_run_meta = self.runs.get_single_run(run_id)
if not single_run_meta:
raise CosmosidException('Response from service is empty for '
'run id %s' % run_id)
if not single_run_meta['status']:
raise NotFoundException(single_run_meta['message'])
results = requests.get(request_url, headers=self.header)
if results.status_code == 403:
raise AuthenticationFailed('Authentication Failed. '
'Wrong API Key.')
if results.status_code == 404:
result_data = results.json()
result_data.update({'status': 0})
result_data.update({'run_meta': single_run_meta})
return result_data
if requests.codes.ok:
result_data = results.json()
result_data.update({'status': 1})
result_data.update({'run_meta': single_run_meta})
return result_data
results.raise_for_status()
except AuthenticationFailed:
self.logger.error('Authentication Failed')
except NotFoundException:
self.logger.error('Not Found')
except CosmosidException:
self.logger.error('Got Analysis data exception.')
except requests.exceptions.RequestException:
self.logger.debug('Debug', exc_info=True)
self.logger.error('Error occured during request')
self.logger.error('Response Status Code: %s', results.status_code)
def get_list(self, file_id=None, run_id=None):
"""Get analysis data.
cli analysis --id ID
"""
if file_id and run_id:
if self.__is_runid_in_file(run_id, file_id):
return self.__get_analysis_by_run_id(run_id)
msg = 'File %s does not contain Run %s' % (self.file_id,
self.run_id)
return {'status': 0,
'message': msg}
elif run_id and not file_id:
return self.__get_analysis_by_run_id(run_id)
elif file_id and not run_id:
return self.__get_analysis_by_file_id(file_id)
| mit | 7,192,464,085,135,019,000 | 40.336957 | 78 | 0.546148 | false |
dongweiming/flask_reveal | social/strategies/base.py | 1 | 6213 | import time
import random
import hashlib
from social.utils import setting_name
from social.store import OpenIdStore
class BaseTemplateStrategy(object):
def __init__(self, strategy):
self.strategy = strategy
def render(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
return self.render_template(tpl, context)
else:
return self.render_string(html, context)
def render_template(self, tpl, context):
raise NotImplementedError('Implement in subclass')
def render_string(self, html, context):
raise NotImplementedError('Implement in subclass')
class BaseStrategy(object):
ALLOWED_CHARS = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789'
def __init__(self, backend=None, storage=None, request=None, tpl=None,
backends=None, *args, **kwargs):
tpl = tpl or BaseTemplateStrategy
if not isinstance(tpl, BaseTemplateStrategy):
tpl = tpl(self)
self.tpl = tpl
self.request = request
self.storage = storage
self.backends = backends
if backend:
self.backend_name = backend.name
self.backend = backend(strategy=self, *args, **kwargs)
else:
self.backend_name = None
self.backend = backend
def setting(self, name, default=None):
names = (setting_name(self.backend_name, name),
setting_name(name),
name)
for name in names:
try:
return self.get_setting(name)
except (AttributeError, KeyError):
pass
return default
def start(self):
# Clean any partial pipeline info before starting the process
self.clean_partial_pipeline()
if self.backend.uses_redirect():
return self.redirect(self.backend.auth_url())
else:
return self.html(self.backend.auth_html())
def complete(self, *args, **kwargs):
return self.backend.auth_complete(*args, **kwargs)
def continue_pipeline(self, *args, **kwargs):
return self.backend.continue_pipeline(*args, **kwargs)
def disconnect(self, user, association_id=None):
self.storage.user.disconnect(name=self.backend.name, user=user,
association_id=association_id)
def authenticate(self, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = self.backend
return self.backend.authenticate(*args, **kwargs)
def create_user(self, *args, **kwargs):
return self.storage.user.create_user(*args, **kwargs)
def get_user(self, *args, **kwargs):
return self.storage.user.get_user(*args, **kwargs)
def session_setdefault(self, name, value):
self.session_set(name, value)
return self.session_get(name)
def to_session(self, next, backend, *args, **kwargs):
return {
'next': next,
'backend': backend.name,
'args': args,
'kwargs': kwargs
}
def from_session(self, session):
return session['next'], session['backend'], \
session['args'], session['kwargs']
def clean_partial_pipeline(self):
self.session_pop('partial_pipeline')
def openid_store(self):
return OpenIdStore(self)
def get_pipeline(self):
return self.setting('PIPELINE', (
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
))
def random_string(self, length=12, chars=ALLOWED_CHARS):
# Implementation borrowed from django 1.4
try:
random.SystemRandom()
except NotImplementedError:
key = self.setting('SECRET_KEY', '')
seed = '%s%s%s' % (random.getstate(), time.time(), key)
random.seed(hashlib.sha256(seed.encode()).digest())
return ''.join([random.choice(chars) for i in range(length)])
def is_integrity_error(self, exception):
return self.storage.is_integrity_error(exception)
# Implement the following methods on strategies sub-classes
def redirect(self, url):
"""Return a response redirect to the given URL"""
raise NotImplementedError('Implement in subclass')
def get_setting(self, name):
"""Return value for given setting name"""
raise NotImplementedError('Implement in subclass')
def html(self, content):
"""Return HTTP response with given content"""
raise NotImplementedError('Implement in subclass')
def render_html(self, tpl=None, html=None, context=None):
"""Render given template or raw html with given context"""
return self.tpl.render(tpl, html, context)
def request_data(self, merge=True):
"""Return current request data (POST or GET)"""
raise NotImplementedError('Implement in subclass')
def request_host(self):
"""Return current host value"""
raise NotImplementedError('Implement in subclass')
def session_get(self, name, default=None):
"""Return session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_set(self, name, value):
"""Set session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_pop(self, name):
"""Pop session value for given key"""
raise NotImplementedError('Implement in subclass')
def build_absolute_uri(self, path=None):
"""Build absolute URI with given (optional) path"""
raise NotImplementedError('Implement in subclass')
def is_response(self, value):
raise NotImplementedError('Implement in subclass')
| bsd-3-clause | -2,883,652,518,667,524,000 | 33.709497 | 74 | 0.617576 | false |
Diex/vamosxpartes | libs/v2-production/tests/find-untested.py | 2 | 1127 | #!/usr/bin/env python
"""
Find classes and functions in ../src which have no test case.
usage: find-untested.py [-l]
Arguments:
-l Long list
(Run in 'tests' directory)
"""
import os
import sys
import re
import glob
def findclasses():
classes = []
dirs = {}
for dir, subdirs, files in os.walk('../src'):
if '.svn' in dir:
continue
for f in files:
if not f.endswith('.as'):
continue
klass = f[:-3]
classes.append(klass)
dirs[klass] = dir[len('../src/com/qb9/flashlib/'):]
return classes, dirs
def readtestcode():
testcode = ''
for f in glob.glob('*.as'):
testcode += file(f).read()
return testcode
def found(classes, testcode):
return re.search(r'\b' + classes + r'\b', testcode) is not None
def find_untested(classes, testcode, dirs):
untested = []
for klass in classes:
if not found(klass, testcode):
untested.append((dirs[klass] + '/' + klass).replace('/', '.'))
return untested
classes, dirs = findclasses()
testcode = readtestcode()
delim = ' '
if len(sys.argv) > 1 and sys.argv[1] == '-l':
delim = '\n'
print delim.join(find_untested(classes, testcode, dirs))
| gpl-2.0 | -7,678,306,479,719,869,000 | 18.431034 | 65 | 0.642413 | false |
FEniCS/ufl | ufl/finiteelement/hdivcurl.py | 1 | 4145 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2016 Andrew T. T. McRae
#
# This file is part of UFL (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by Massimiliano Leoni, 2016
from ufl.finiteelement.finiteelementbase import FiniteElementBase
from ufl.sobolevspace import HDiv, HCurl
class HDivElement(FiniteElementBase):
"""A div-conforming version of an outer product element, assuming
this makes mathematical sense."""
__slots__ = ("_element",)
def __init__(self, element):
self._element = element
self._repr = "HDivElement(%s)" % repr(element)
family = "TensorProductElement"
cell = element.cell()
degree = element.degree()
quad_scheme = element.quadrature_scheme()
value_shape = (element.cell().geometric_dimension(),)
reference_value_shape = (element.cell().topological_dimension(),)
# Skipping TensorProductElement constructor! Bad code smell, refactor to avoid this non-inheritance somehow.
FiniteElementBase.__init__(self, family, cell, degree,
quad_scheme, value_shape, reference_value_shape)
def mapping(self):
return "contravariant Piola"
def sobolev_space(self):
"Return the underlying Sobolev space."
return HDiv
def reconstruct(self, **kwargs):
return HDivElement(self._element.reconstruct(**kwargs))
def __str__(self):
return "HDivElement(%s)" % str(self._element)
def shortstr(self):
"Format as string for pretty printing."
return "HDivElement(%s)" % str(self._element.shortstr())
class HCurlElement(FiniteElementBase):
"""A curl-conforming version of an outer product element, assuming
this makes mathematical sense."""
__slots__ = ("_element",)
def __init__(self, element):
self._element = element
self._repr = "HCurlElement(%s)" % repr(element)
family = "TensorProductElement"
cell = element.cell()
degree = element.degree()
quad_scheme = element.quadrature_scheme()
cell = element.cell()
value_shape = (cell.geometric_dimension(),)
reference_value_shape = (cell.topological_dimension(),) # TODO: Is this right?
# Skipping TensorProductElement constructor! Bad code smell,
# refactor to avoid this non-inheritance somehow.
FiniteElementBase.__init__(self, family, cell, degree, quad_scheme,
value_shape, reference_value_shape)
def mapping(self):
return "covariant Piola"
def sobolev_space(self):
"Return the underlying Sobolev space."
return HCurl
def reconstruct(self, **kwargs):
return HCurlElement(self._element.reconstruct(**kwargs))
def __str__(self):
return "HCurlElement(%s)" % str(self._element)
def shortstr(self):
"Format as string for pretty printing."
return "HCurlElement(%s)" % str(self._element.shortstr())
class WithMapping(FiniteElementBase):
"""Specify an alternative mapping for the wrappee. For example,
to use identity mapping instead of Piola map with an element E,
write
remapped = WithMapping(E, "identity")
"""
def __init__(self, wrapee, mapping):
self._repr = "WithMapping(%s, %s)" % (repr(wrapee), mapping)
self._mapping = mapping
self.wrapee = wrapee
def __getattr__(self, attr):
try:
return getattr(self.wrapee, attr)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, attr))
def mapping(self):
return self._mapping
def reconstruct(self, **kwargs):
mapping = kwargs.pop("mapping", self._mapping)
wrapee = self.wrapee.reconstruct(**kwargs)
return type(self)(wrapee, mapping)
def __str__(self):
return "WithMapping(%s, mapping=%s)" % (self.wrapee, self._mapping)
def shortstr(self):
return "WithMapping(%s, %s)" % (self.wrapee.shortstr(), self._mapping)
| lgpl-3.0 | 472,424,525,507,834,050 | 33.256198 | 116 | 0.626055 | false |
python-control/python-control | control/tests/modelsimp_test.py | 1 | 9062 | """modelsimp_array_test.py - test model reduction functions
RMM, 30 Mar 2011 (based on TestModelSimp from v0.4a)
"""
import numpy as np
import pytest
from control import StateSpace, forced_response, tf, rss, c2d
from control.exception import ControlMIMONotImplemented
from control.tests.conftest import slycotonly, matarrayin
from control.modelsimp import balred, hsvd, markov, modred
class TestModelsimp:
"""Test model reduction functions"""
@slycotonly
def testHSVD(self, matarrayout, matarrayin):
A = matarrayin([[1., -2.], [3., -4.]])
B = matarrayin([[5.], [7.]])
C = matarrayin([[6., 8.]])
D = matarrayin([[9.]])
sys = StateSpace(A, B, C, D)
hsv = hsvd(sys)
hsvtrue = np.array([24.42686, 0.5731395]) # from MATLAB
np.testing.assert_array_almost_equal(hsv, hsvtrue)
# test for correct return type: ALWAYS return ndarray, even when
# use_numpy_matrix(True) was used
assert isinstance(hsv, np.ndarray)
assert not isinstance(hsv, np.matrix)
def testMarkovSignature(self, matarrayout, matarrayin):
U = matarrayin([[1., 1., 1., 1., 1.]])
Y = U
m = 3
H = markov(Y, U, m, transpose=False)
Htrue = np.array([[1., 0., 0.]])
np.testing.assert_array_almost_equal(H, Htrue)
# Make sure that transposed data also works
H = markov(np.transpose(Y), np.transpose(U), m, transpose=True)
np.testing.assert_array_almost_equal(H, np.transpose(Htrue))
# Generate Markov parameters without any arguments
H = markov(Y, U, m)
np.testing.assert_array_almost_equal(H, Htrue)
# Test example from docstring
T = np.linspace(0, 10, 100)
U = np.ones((1, 100))
T, Y = forced_response(tf([1], [1, 0.5], True), T, U)
H = markov(Y, U, 3, transpose=False)
# Test example from issue #395
inp = np.array([1, 2])
outp = np.array([2, 4])
mrk = markov(outp, inp, 1, transpose=False)
# Make sure MIMO generates an error
U = np.ones((2, 100)) # 2 inputs (Y unchanged, with 1 output)
with pytest.raises(ControlMIMONotImplemented):
markov(Y, U, m)
# Make sure markov() returns the right answer
@pytest.mark.parametrize("k, m, n",
[(2, 2, 2),
(2, 5, 5),
(5, 2, 2),
(5, 5, 5),
(5, 10, 10)])
def testMarkovResults(self, k, m, n):
#
# Test over a range of parameters
#
# k = order of the system
# m = number of Markov parameters
# n = size of the data vector
#
# Values *should* match exactly for n = m, otherewise you get a
# close match but errors due to the assumption that C A^k B =
# 0 for k > m-2 (see modelsimp.py).
#
# Generate stable continuous time system
Hc = rss(k, 1, 1)
# Choose sampling time based on fastest time constant / 10
w, _ = np.linalg.eig(Hc.A)
Ts = np.min(-np.real(w)) / 10.
# Convert to a discrete time system via sampling
Hd = c2d(Hc, Ts, 'zoh')
# Compute the Markov parameters from state space
Mtrue = np.hstack([Hd.D] + [np.dot(
Hd.C, np.dot(np.linalg.matrix_power(Hd.A, i),
Hd.B)) for i in range(m-1)])
# Generate input/output data
T = np.array(range(n)) * Ts
U = np.cos(T) + np.sin(T/np.pi)
_, Y = forced_response(Hd, T, U, squeeze=True)
Mcomp = markov(Y, U, m)
# Compare to results from markov()
# experimentally determined probability to get non matching results
# with rtot=1e-6 and atol=1e-8 due to numerical errors
# for k=5, m=n=10: 0.015 %
np.testing.assert_allclose(Mtrue, Mcomp, rtol=1e-6, atol=1e-8)
def testModredMatchDC(self, matarrayin):
#balanced realization computed in matlab for the transfer function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-1.958, -1.194, 1.824, -1.464],
[-1.194, -0.8344, 2.563, -1.351],
[-1.824, -2.563, -1.124, 2.704],
[-1.464, -1.351, -2.704, -11.08]])
B = matarrayin([[-0.9057], [-0.4068], [-0.3263], [-0.3474]])
C = matarrayin([[-0.9057, -0.4068, 0.3263, -0.3474]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
rsys = modred(sys,[2, 3],'matchdc')
Artrue = np.array([[-4.431, -4.552], [-4.552, -5.361]])
Brtrue = np.array([[-1.362], [-1.031]])
Crtrue = np.array([[-1.362, -1.031]])
Drtrue = np.array([[-0.08384]])
np.testing.assert_array_almost_equal(rsys.A, Artrue, decimal=3)
np.testing.assert_array_almost_equal(rsys.B, Brtrue, decimal=3)
np.testing.assert_array_almost_equal(rsys.C, Crtrue, decimal=3)
np.testing.assert_array_almost_equal(rsys.D, Drtrue, decimal=2)
def testModredUnstable(self, matarrayin):
"""Check if an error is thrown when an unstable system is given"""
A = matarrayin(
[[4.5418, 3.3999, 5.0342, 4.3808],
[0.3890, 0.3599, 0.4195, 0.1760],
[-4.2117, -3.2395, -4.6760, -4.2180],
[0.0052, 0.0429, 0.0155, 0.2743]])
B = matarrayin([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
C = matarrayin([[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]])
D = matarrayin([[0.0, 0.0], [0.0, 0.0]])
sys = StateSpace(A, B, C, D)
np.testing.assert_raises(ValueError, modred, sys, [2, 3])
def testModredTruncate(self, matarrayin):
#balanced realization computed in matlab for the transfer function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-1.958, -1.194, 1.824, -1.464],
[-1.194, -0.8344, 2.563, -1.351],
[-1.824, -2.563, -1.124, 2.704],
[-1.464, -1.351, -2.704, -11.08]])
B = matarrayin([[-0.9057], [-0.4068], [-0.3263], [-0.3474]])
C = matarrayin([[-0.9057, -0.4068, 0.3263, -0.3474]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
rsys = modred(sys,[2, 3],'truncate')
Artrue = np.array([[-1.958, -1.194], [-1.194, -0.8344]])
Brtrue = np.array([[-0.9057], [-0.4068]])
Crtrue = np.array([[-0.9057, -0.4068]])
Drtrue = np.array([[0.]])
np.testing.assert_array_almost_equal(rsys.A, Artrue)
np.testing.assert_array_almost_equal(rsys.B, Brtrue)
np.testing.assert_array_almost_equal(rsys.C, Crtrue)
np.testing.assert_array_almost_equal(rsys.D, Drtrue)
@slycotonly
def testBalredTruncate(self, matarrayin):
# controlable canonical realization computed in matlab for the transfer
# function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-15., -7.5, -6.25, -1.875],
[8., 0., 0., 0.],
[0., 4., 0., 0.],
[0., 0., 1., 0.]])
B = matarrayin([[2.], [0.], [0.], [0.]])
C = matarrayin([[0.5, 0.6875, 0.7031, 0.5]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
orders = 2
rsys = balred(sys, orders, method='truncate')
Artrue = np.array([[-1.958, -1.194], [-1.194, -0.8344]])
Brtrue = np.array([[0.9057], [0.4068]])
Crtrue = np.array([[0.9057, 0.4068]])
Drtrue = np.array([[0.]])
np.testing.assert_array_almost_equal(rsys.A, Artrue, decimal=2)
np.testing.assert_array_almost_equal(rsys.B, Brtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.C, Crtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.D, Drtrue, decimal=4)
@slycotonly
def testBalredMatchDC(self, matarrayin):
# controlable canonical realization computed in matlab for the transfer
# function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-15., -7.5, -6.25, -1.875],
[8., 0., 0., 0.],
[0., 4., 0., 0.],
[0., 0., 1., 0.]])
B = matarrayin([[2.], [0.], [0.], [0.]])
C = matarrayin([[0.5, 0.6875, 0.7031, 0.5]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
orders = 2
rsys = balred(sys,orders,method='matchdc')
Artrue = np.array(
[[-4.43094773, -4.55232904],
[-4.55232904, -5.36195206]])
Brtrue = np.array([[1.36235673], [1.03114388]])
Crtrue = np.array([[1.36235673, 1.03114388]])
Drtrue = np.array([[-0.08383902]])
np.testing.assert_array_almost_equal(rsys.A, Artrue, decimal=2)
np.testing.assert_array_almost_equal(rsys.B, Brtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.C, Crtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.D, Drtrue, decimal=4)
| bsd-3-clause | -5,015,453,693,631,061,000 | 39.81982 | 79 | 0.537188 | false |
lincolnloop/salt-stats | salt/_returners/influxdb_return.py | 1 | 2217 | # -*- coding: utf-8 -*-
'''
Salt returner that reports stats to InfluxDB. The returner will
inspect the payload coercing values into floats where possible.
Pillar needs something like::
influxdb_returner:
url: http://localhost:8086
user: root
password: root
database: salt
'''
import base64
import fnmatch
import logging
import json
import urllib2
logger = logging.getLogger(__name__)
__virtualname__ = 'influxdb'
def __virtual__():
return __virtualname__
def _flatten_values(obj, base=None):
"""
Recursive function to flatten dictionaries and
coerce values to floats.
"""
flattened = {}
# convert list to dictionary
if isinstance(obj, list):
obj = dict([(str(pair[0]), pair[1]) for pair in enumerate(obj)])
elif not isinstance(obj, dict):
obj = {'value': obj}
for key, item in obj.items():
key = base and '.'.join([base, key]) or key
if isinstance(item, dict):
flattened.update(_flatten_values(item, base=key))
else:
try:
flattened[key] = float(item)
except ValueError:
flattened[key] = item
return flattened
def returner(ret):
config = __pillar__.get('influxdb_returner', {})
user = config.get('user', '')
password = config.get('password', '')
database = config.get('database', '')
host = config.get('url', '')
data = _flatten_values(ret['return'])
series = "{host}-{function}".format(host=ret['id'], function=ret['fun'])
logger.debug("InfluxDB series name: %s", series)
payload = json.dumps([{
'name': series,
'columns': data.keys(),
'points': [data.values()],
}])
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
url = "{host}/db/{db}/series?u={user}&p={pw}".format(
host=host, db=database, user=user, pw=password)
req = urllib2.Request(url, payload, headers)
try:
handler = urllib2.urlopen(req)
logger.debug("InfluxDB responded %s", handler.getcode())
except urllib2.HTTPError as exp:
logger.error("InfluxDB request failed with code %s", exp.code)
handler.close()
| mit | 6,890,723,242,404,881,000 | 27.792208 | 76 | 0.603067 | false |
rahulraj/web_projects | assignment2/src/photogallery/generator/gallerygenerator.py | 1 | 6537 | import os
import getopt
import sys
from ..utils.inject import assign_injectables
from ..utils.immutabledict import ImmutableDict
from manifestparser import ManifestParser
from galleryitemfactory import GalleryItemFactory
import exporter
import templatewriter
import copier
class GalleryGenerator(object):
"""
The top level class for the application. This is the only object
that the main function interacts with.
"""
def __init__(self, gallery_item_factory, input_directory, output_directory,
static_files_directory, exporter, template_writer):
"""
Constructor for GalleryGenerator. All needed service objects are injected.
Args:
gallery_item_factory the GalleryItemFactory that creates the items.
input_directory the path of the directory to start in.
output_directory the directory to which files should be written.
static_files_directory the directory containing static files to copy over.
exporter the Exporter to populate the templates.
template_writer the object that writes the templates to disk.
"""
assign_injectables(self, locals())
def run(self):
top_jpeg_directory = \
self.gallery_item_factory.create_directory(self.input_directory)
populated_templates = self.exporter.export(top_jpeg_directory)
self.template_writer.write_templates(populated_templates)
# We need to copy the JPEGs over too, and the CSS
copier.copy_jpegs(self.input_directory, self.output_directory)
copier.copy_css(self.static_files_directory, self.output_directory)
# Also, if there are scripts that enhance the experience,
# copy them over too.
copier.copy_javascript(self.static_files_directory, self.output_directory)
# Also grab a copy of directory_image.jpg
copier.copy_jpegs(self.static_files_directory, self.output_directory)
# And make a symlink for browsing convenience.
self.symlink_index(self.output_directory,
top_jpeg_directory.get_output_file_name())
def symlink_index(self, output_directory, file_name):
"""
Symlink "index.html" to file_name. Presumably, file_name is the top-level
page. This way, the page will appear when someone navigates to the directory
in a web browser.
Args:
file_name the name of the file to symlink to.
"""
full_link_name = os.path.join(output_directory, 'index.html')
try:
os.symlink(file_name, full_link_name)
except OSError:
print 'You already have a file named index.html in the output ' + \
'directory, so the symlink failed.'
print "I'll assume that there was a specific page that you wanted to" + \
'display when the user points a browser at the output directory.'
print 'Skipping the symlink...'
def create_gallery_generator(command_line_arguments, css_directory):
"""
Given command line arguments, wire up the application and return
it to the main function. This requires creating most of the objects
described in the other files from this directory.
Args:
command_line_arguments the command line arguments with the program
name removed.
css_directory the directory containing the CSS files.
"""
input_data = parse_command_line_arguments(command_line_arguments)
# First parse the manifest file
with open(input_data['manifest_file'], 'r') as manifest_file:
parser = ManifestParser(manifest_file)
lookup_table = parser.get_json_data()
factory = GalleryItemFactory(lookup_table, input_data['should_prompt'])
template_exporter = exporter.create_photo_directory_exporter()
template_writer = \
templatewriter.create_template_writer(input_data['output_directory'])
return GalleryGenerator(gallery_item_factory=factory,
input_directory=input_data['input_directory'],
output_directory=input_data['output_directory'],
static_files_directory=css_directory,
exporter=template_exporter,
template_writer=template_writer)
def parse_command_line_arguments(command_line_arguments):
"""
Acceptable command line arguments are:
-h, --help -> Prints a help message
-i, --input-directory -> The root directory for the gallery (required)
-o, --output-directory -> the output directory for the HTML (required)
-n, --no-prompt -> Automatically use inferred names for directories,
instead of prompting the user.
Args:
command_line_arguments the command line arguments with the program
name removed.
"""
try:
options, arguments = getopt.getopt(command_line_arguments,
"hi:o:m:n", ['help', 'input-directory=', 'output-directory=',
'manifest-file=', 'no-prompt'])
except getopt.GetoptError:
print_usage()
sys.exit(2)
input_data = {'should_prompt': True}
for option, argument in options:
if option in ('-h', '--help'):
print_usage()
sys.exit(0)
elif option in ('-i', '--input-directory'):
if os.path.isdir(argument):
input_data['input_directory'] = argument
else:
print argument, "doesn't appear to be a directory."
print_usage()
sys.exit(1)
elif option in ('-o', '--output-directory'):
input_data['output_directory'] = argument
elif option in ('-m', '--manifest-file'):
if os.path.isfile(argument):
input_data['manifest_file'] = argument
else:
print argument, "file couldn't be read for some reason."
print_usage()
sys.exit(1)
elif option in ('-n', '--no-prompt'):
input_data['should_prompt'] = False
if 'input_directory' not in input_data \
or 'output_directory' not in input_data \
or 'manifest_file' not in input_data:
print_usage()
sys.exit(1)
return ImmutableDict(input_data)
def print_usage():
print "Please call this script with the following arguments:"
print "-i my_pictures/ where my_pictures is the directory containing " + \
"the JPEGs to render (long form: --input-directory=)"
print "-o my_site/ where my_site is the directory in which to " + \
"write the output files (long form: --output-directory=)"
print "-m manifest.json where manifest.json is a manifest file " + \
"describing the JPEGs' metadata as a JSON string (long form:" + \
"--manifest_file=)"
print "-n Automatically infer directory titles instead of asking, " + \
"will ask by default. (long form: --no-prompt)"
print "Calling this script with -h or --help prints this message " + \
"and exits."
| mit | 2,190,217,508,828,373,800 | 39.602484 | 80 | 0.690684 | false |
marcopaz/is-service-up | isserviceup/celeryapp.py | 1 | 3743 | import logging
import time
import raven
from celery import Celery
from celery.utils.log import get_task_logger
from raven.conf import setup_logging
from raven.contrib.celery import register_signal, register_logger_signal
from raven.handlers.logging import SentryHandler
from isserviceup import managers
from isserviceup.config import celery as celeryconfig
from isserviceup.config import config
from isserviceup.models.favorite import Favorite
from isserviceup.notifiers.slack import Slack
from isserviceup.services import SERVICES
from isserviceup.services.models.service import Status
from isserviceup.storage.services import set_service_status, set_last_update
MAX_RETRIES = 3
DELAY_RETRY = 2
_notified_on_startup = {}
app = Celery('app')
app.config_from_object(celeryconfig)
logger = get_task_logger(__name__)
if config.SENTRY_DSN:
client = raven.Client(config.SENTRY_DSN)
register_logger_signal(client, loglevel=logging.ERROR)
register_signal(client)
# report logging errors
handler = SentryHandler(client)
setup_logging(handler)
# show sentry errors in the console
logger = logging.getLogger('sentry.errors')
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
@app.task(name='update-services-status')
def update_services_status():
set_last_update(managers.rclient, time.time())
for service_id in SERVICES:
update_service_status.delay(service_id)
@app.task(bind=True, max_retries=MAX_RETRIES)
def update_service_status(self, service_id):
service = SERVICES[service_id]
logger.info('Updating status for service {}'.format(service.name))
try:
status = service.get_status()
except Exception as exc:
if self.request.retries == MAX_RETRIES-1: # last retry
set_service_status(managers.rclient, service, Status.unavailable)
raise
else:
return self.retry(exc=exc, countdown=DELAY_RETRY)
logger.info('Service={} has status={}'.format(service.name, status.name))
old_status = set_service_status(managers.rclient, service, status)
if ((config.NOTIFY_ON_STARTUP and service.id not in _notified_on_startup)
or (old_status is not None and old_status != status)):
broadcast_status_change.delay(
service.id, old_status.name if old_status else "", status.name)
_notified_on_startup[service.id] = True
@app.task()
def broadcast_status_change(service_id, old_status, new_status):
send_all_slack_notifications.delay(service_id, old_status, new_status)
for i in range(len(config.NOTIFIERS)):
notify_status_change.delay(i, service_id, old_status, new_status)
@app.task()
def notify_status_change(idx, service_id, old_status, new_status):
service = SERVICES[service_id]
notifier = config.NOTIFIERS[idx]
notifier.notify(service, old_status, new_status)
@app.task()
def send_all_slack_notifications(service_id, old_status, new_status):
favs = Favorite.objects(service_id=service_id,
slack_webhook__ne=None,
monitored_status__all=[old_status, new_status])
if not favs:
return
for fav in favs:
send_slack_notification.delay(fav.slack_webhook, service_id, old_status, new_status)
@app.task()
def send_slack_notification(webhook_url, service_id, old_status, new_status):
service = SERVICES[service_id]
desc = config.get_status_description()
old_status_desc = desc[Status[old_status]]
new_status_desc = desc[Status[new_status]]
Slack(webhook_url).notify(service, old_status_desc, new_status_desc)
# TODO: remove webhook if the request fails X times in a row
if __name__ == '__main__':
app.start()
| apache-2.0 | -4,758,288,003,821,531,000 | 33.33945 | 92 | 0.709591 | false |
mscuthbert/abjad | abjad/tools/schemetools/Scheme.py | 1 | 10697 | # -*- encoding: utf-8 -*-
from abjad.tools import stringtools
from abjad.tools.abctools import AbjadValueObject
class Scheme(AbjadValueObject):
r'''Abjad model of Scheme code.
.. container:: example
**Example 1.** A Scheme boolean value:
::
>>> scheme = schemetools.Scheme(True)
>>> print(format(scheme))
##t
.. container:: example
**Example 2.** A nested Scheme expession:
::
>>> scheme = schemetools.Scheme(
... ('left', (1, 2, False)),
... ('right', (1, 2, 3.3))
... )
>>> print(format(scheme))
#((left (1 2 #f)) (right (1 2 3.3)))
.. container:: example
**Example 3.** A variable-length argument:
::
>>> scheme_1 = schemetools.Scheme(1, 2, 3)
>>> scheme_2 = schemetools.Scheme((1, 2, 3))
>>> format(scheme_1) == format(scheme_2)
True
Scheme wraps nested variable-length arguments in a tuple.
.. container:: example
**Example 4.** A quoted Scheme expression:
::
>>> scheme = schemetools.Scheme((1, 2, 3), quoting="'#")
>>> print(format(scheme))
#'#(1 2 3)
Use the `quoting` keyword to prepend Scheme's various quote, unquote,
unquote-splicing characters to formatted output.
.. container:: example
**Example 5.** A Scheme expression with forced quotes:
::
>>> scheme = schemetools.Scheme('nospaces', force_quotes=True)
>>> print(format(scheme))
#"nospaces"
Use this in certain \override situations when LilyPond's Scheme
interpreter treats unquoted strings as symbols instead of strings.
The string must contain no whitespace for this to work.
.. container:: example
**Example 6.** A Scheme expression of LilyPond functions:
::
>>> function_1 = 'tuplet-number::append-note-wrapper'
>>> function_2 = 'tuplet-number::calc-denominator-text'
>>> string = schemetools.Scheme('4', force_quotes=True)
>>> scheme = schemetools.Scheme(
... function_1,
... function_2,
... string,
... )
>>> scheme
Scheme('tuplet-number::append-note-wrapper', 'tuplet-number::calc-denominator-text', Scheme('4', force_quotes=True))
>>> print(format(scheme))
#(tuplet-number::append-note-wrapper tuplet-number::calc-denominator-text "4")
.. container:: example
**Example 7.** A Scheme lambda expression of LilyPond function that
takes a markup with a quoted string argument. Setting verbatim to true
causes the expression to format exactly as-is without modifying quotes
or whitespace:
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> scheme = schemetools.Scheme(string, verbatim=True)
>>> scheme
Scheme('(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #"noteheads.s0harmonic" #}))')
>>> print(format(scheme))
#(lambda (grob) (grob-interpret-markup grob #{ \markup \musicglyph #"noteheads.s0harmonic" #}))
Scheme objects are immutable.
'''
### CLASS VARIABLES ###
__slots__ = (
'_force_quotes',
'_quoting',
'_value',
'_verbatim',
)
### INITIALIZER ###
def __init__(self, *args, **kwargs):
if 1 == len(args):
if isinstance(args[0], type(self)):
args = args[0]._value
else:
args = args[0]
quoting = kwargs.get('quoting')
force_quotes = bool(kwargs.get('force_quotes'))
verbatim = kwargs.get('verbatim')
assert isinstance(quoting, (str, type(None)))
if quoting is not None:
assert all(x in ("'", ',', '@', '`', '#') for x in quoting)
self._force_quotes = force_quotes
self._quoting = quoting
self._value = args
self._verbatim = bool(verbatim)
### SPECIAL METHODS ###
def __format__(self, format_specification=''):
r'''Formats scheme.
Set `format_specification` to `''`', `'lilypond'` or ``'storage'``.
Interprets `''` equal to `'lilypond'`.
.. container:: example
**Example 1.** Scheme LilyPond format:
::
>>> scheme = schemetools.Scheme('foo')
>>> format(scheme)
'#foo'
.. container:: example
**Example 2.** Scheme storage format:
::
>>> print(format(scheme, 'storage'))
schemetools.Scheme(
'foo'
)
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'lilypond'):
return self._lilypond_format
elif format_specification == 'storage':
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getnewargs__(self):
r'''Gets new arguments.
Returns tuple.
'''
return (self._value,)
def __str__(self):
r'''String representation of scheme object.
Returns string.
'''
if self._quoting is not None:
return self._quoting + self._formatted_value
return self._formatted_value
### PRIVATE PROPERTIES ###
@property
def _formatted_value(self):
from abjad.tools import schemetools
return schemetools.Scheme.format_scheme_value(
self._value,
force_quotes=self.force_quotes,
verbatim=self.verbatim,
)
@property
def _lilypond_format(self):
if self._quoting is not None:
return '#' + self._quoting + self._formatted_value
return '#%s' % self._formatted_value
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
if stringtools.is_string(self._value):
positional_argument_values = (self._value,)
else:
positional_argument_values = self._value
keyword_argument_names = []
if self.force_quotes:
keyword_argument_names.append('force_quotes')
if self.quoting:
keyword_argument_names.append('quoting')
return systemtools.StorageFormatSpecification(
self,
keyword_argument_names=keyword_argument_names,
positional_argument_values=positional_argument_values,
)
### PUBLIC METHODS ###
@staticmethod
def format_embedded_scheme_value(value, force_quotes=False):
r'''Formats `value` as an embedded Scheme value.
'''
from abjad.tools import datastructuretools
from abjad.tools import schemetools
result = Scheme.format_scheme_value(value, force_quotes=force_quotes)
if isinstance(value, bool):
result = '#{}'.format(result)
elif isinstance(value, datastructuretools.OrdinalConstant):
result = '#{}'.format(repr(value).lower())
elif isinstance(value, str) and not force_quotes:
result = '#{}'.format(result)
elif isinstance(value, schemetools.Scheme):
result = '#{}'.format(result)
return result
@staticmethod
def format_scheme_value(value, force_quotes=False, verbatim=False):
r'''Formats `value` as Scheme would.
.. container:: example
**Example 1.** Some basic values:
::
>>> schemetools.Scheme.format_scheme_value(1)
'1'
::
>>> schemetools.Scheme.format_scheme_value('foo')
'foo'
::
>>> schemetools.Scheme.format_scheme_value('bar baz')
'"bar baz"'
::
>>> schemetools.Scheme.format_scheme_value([1.5, True, False])
'(1.5 #t #f)'
.. container:: example
**Example 2.** Strings without whitespace can be forcibly quoted
via the `force_quotes` keyword:
::
>>> schemetools.Scheme.format_scheme_value(
... 'foo',
... force_quotes=True,
... )
'"foo"'
.. container:: example
**Example 3.** Set verbatim to true to format value exactly (with
only hash preprended):
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> schemetools.Scheme.format_scheme_value(string, verbatim=True)
'(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #"noteheads.s0harmonic" #}))'
Returns string.
'''
from abjad.tools import schemetools
if isinstance(value, str) and not verbatim:
value = value.replace('"', r'\"')
if -1 == value.find(' ') and not force_quotes:
return value
return '"{}"'.format(value)
elif isinstance(value, str) and verbatim:
return value
elif isinstance(value, bool):
if value:
return '#t'
return '#f'
elif isinstance(value, (list, tuple)):
return '({})'.format(
' '.join(schemetools.Scheme.format_scheme_value(x)
for x in value))
elif isinstance(value, schemetools.Scheme):
return str(value)
elif isinstance(value, type(None)):
return '#f'
return str(value)
### PUBLIC PROPERTIES ###
@property
def force_quotes(self):
r'''Is true when quotes should be forced in output. Otherwise false.
Returns boolean.
'''
return self._force_quotes
@property
def quoting(self):
r'''Gets Scheme quoting string.
Return string.
'''
return self._quoting
@property
def verbatim(self):
r'''Is true when formatting should format value absolutely verbatim.
Whitespace, quotes and all other parts of value are left in tact.
Defaults to false.
Set to true or false.
Returns true or false.
'''
return self._verbatim
| gpl-3.0 | 9,166,033,055,927,871,000 | 29.303116 | 128 | 0.536973 | false |
calancha/DIRAC | DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py | 1 | 19627 | ########################################################################
# $HeadURL $
# File: ReplicateAndRegister.py
# Author: [email protected]
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: [email protected]
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id $"
# #
# @file ReplicateAndRegister.py
# @author [email protected]
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gMonitor, gLogger
from DIRAC.Core.Utilities.Adler import compareAdler
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
def filterReplicas( opFile, logger = None, dataManager = None, seCache = None ):
""" filter out banned/invalid source SEs """
if not logger:
logger = gLogger
if not dataManager:
dataManager = DataManager()
if not seCache:
seCache = {}
log = logger.getSubLogger( "filterReplicas" )
ret = { "Valid" : [], "NoMetadata" : [], "Bad" : [], 'NoReplicas':[], 'NoPFN':[] }
replicas = dataManager.getActiveReplicas( opFile.LFN )
if not replicas["OK"]:
log.error( replicas["Message"] )
return replicas
reNotExists = re.compile( r".*such file.*" )
replicas = replicas["Value"]
failed = replicas["Failed"].get( opFile.LFN , "" )
if reNotExists.match( failed.lower() ):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR( failed )
replicas = replicas["Successful"].get( opFile.LFN, {} )
for repSEName in replicas:
repSE = seCache[repSEName] if repSEName in seCache else \
seCache.setdefault( repSEName, StorageElement( repSEName ) )
pfn = repSE.getPfnForLfn( opFile.LFN )
if not pfn["OK"] or opFile.LFN not in pfn['Value']['Successful']:
log.warn( "unable to create pfn for %s lfn at %s: %s" % ( opFile.LFN,
repSEName,
pfn.get( 'Message', pfn.get( 'Value', {} ).get( 'Failed', {} ).get( opFile.LFN ) ) ) )
ret["NoPFN"].append( repSEName )
else:
pfn = pfn["Value"]['Successful'][ opFile.LFN ]
repSEMetadata = repSE.getFileMetadata( pfn )
error = repSEMetadata.get( 'Message', repSEMetadata.get( 'Value', {} ).get( 'Failed', {} ).get( pfn ) )
if error:
log.warn( 'unable to get metadata at %s for %s' % ( repSEName, opFile.LFN ), error.replace( '\n', '' ) )
if 'File does not exist' in error:
ret['NoReplicas'].append( repSEName )
else:
ret["NoMetadata"].append( repSEName )
else:
repSEMetadata = repSEMetadata['Value']['Successful'][pfn]
seChecksum = repSEMetadata.get( "Checksum" )
if opFile.Checksum and seChecksum and not compareAdler( seChecksum, opFile.Checksum ) :
# The checksum in the request may be wrong, check with FC
fcMetadata = FileCatalog().getFileMetadata( opFile.LFN )
fcChecksum = fcMetadata.get( 'Value', {} ).get( 'Successful', {} ).get( opFile.LFN, {} ).get( 'Checksum' )
if fcChecksum and fcChecksum != opFile.Checksum and compareAdler( fcChecksum , seChecksum ):
opFile.Checksum = fcChecksum
ret['Valid'].append( repSEName )
else:
log.warn( " %s checksum mismatch, request: %s @%s: %s" % ( opFile.LFN,
opFile.Checksum,
repSEName,
seChecksum ) )
ret["Bad"].append( repSEName )
else:
# # if we're here repSE is OK
ret["Valid"].append( repSEName )
return S_OK( ret )
########################################################################
class ReplicateAndRegister( DMSRequestOperationsBase ):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
super( ReplicateAndRegister, self ).__init__( operation, csPath )
# # own gMonitor stuff for files
gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # for FTS
gMonitor.registerActivity( "FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # SE cache
self.seCache = {}
# Clients
self.fc = FileCatalog()
if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ):
self.ftsClient = FTSClient()
def __call__( self ):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error( checkReplicas["Message"] )
if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ):
bannedGroups = getattr( self, "FTSBannedGroups" ) if hasattr( self, "FTSBannedGroups" ) else ()
if self.request.OwnerGroup in bannedGroups:
self.log.verbose( "usage of FTS system is banned for request's owner" )
return self.dmTransfer()
return self.ftsTransfer()
return self.dmTransfer()
def __checkReplicas( self ):
""" check done replicas and update file states """
waitingFiles = dict( [ ( opFile.LFN, opFile ) for opFile in self.operation
if opFile.Status in ( "Waiting", "Scheduled" ) ] )
targetSESet = set( self.operation.targetSEList )
replicas = self.fc.getReplicas( waitingFiles.keys() )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
reMissing = re.compile( r".*such file.*" )
for failedLFN, errStr in replicas["Value"]["Failed"].items():
waitingFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
self.log.error( "file %s does not exists" % failedLFN )
gMonitor.addMark( "ReplicateFail", len( targetSESet ) )
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].items():
if targetSESet.issubset( set( reps ) ):
self.log.info( "file %s has been replicated to all targets" % successfulLFN )
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _addMetadataToFiles( self, toSchedule ):
""" Add metadata to those files that need to be scheduled through FTS
toSchedule is a dictionary:
{'lfn1': [opFile, validReplicas, validTargets], 'lfn2': [opFile, validReplicas, validTargets]}
"""
if toSchedule:
self.log.info( "found %s files to schedule, getting metadata from FC" % len( toSchedule ) )
lfns = toSchedule.keys()
else:
self.log.info( "No files to schedule" )
return S_OK()
res = self.fc.getFileMetadata( lfns )
if not res['OK']:
return res
else:
if res['Value']['Failed']:
self.log.warn( "Can't schedule %d files: problems getting the metadata: %s" % ( len( res['Value']['Failed'] ),
', '.join( res['Value']['Failed'] ) ) )
metadata = res['Value']['Successful']
filesToScheduleList = []
for lfnsToSchedule, lfnMetadata in metadata.items():
opFileToSchedule = toSchedule[lfnsToSchedule][0]
opFileToSchedule.GUID = lfnMetadata['GUID']
opFileToSchedule.Checksum = metadata[lfnsToSchedule]['Checksum']
opFileToSchedule.ChecksumType = metadata[lfnsToSchedule]['ChecksumType']
opFileToSchedule.Size = metadata[lfnsToSchedule]['Size']
filesToScheduleList.append( ( opFileToSchedule.toJSON()['Value'],
toSchedule[lfnsToSchedule][1],
toSchedule[lfnsToSchedule][2] ) )
return S_OK( filesToScheduleList )
def _filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
return filterReplicas( opFile, logger = self.log, dataManager = self.dm, seCache = self.seCache )
def ftsTransfer( self ):
""" replicate and register using FTS """
self.log.info( "scheduling files in FTS..." )
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark( "FTSScheduleAtt" )
gMonitor.addMark( "FTSScheduleFail" )
return bannedTargets
if bannedTargets['Value']:
return S_OK( "%s targets are banned for writing" % ",".join( bannedTargets['Value'] ) )
# Can continue now
self.log.verbose( "No targets banned for writing" )
toSchedule = {}
for opFile in self.getWaitingFilesList():
opFile.Error = ''
gMonitor.addMark( "FTSScheduleAtt" )
# # check replicas
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if validReplicas:
validTargets = list( set( self.operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
self.log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [ opFile, validReplicas, validTargets ]
else:
gMonitor.addMark( "FTSScheduleFail" )
if noMetaReplicas:
self.log.warn( "unable to schedule '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error( "unable to schedule %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error( "unable to schedule %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn( "unable to schedule %s, could not get a PFN at %s" % ( opFile.LFN, ','.join( noPFN ) ) )
res = self._addMetadataToFiles( toSchedule )
if not res['OK']:
return res
else:
filesToScheduleList = res['Value']
if filesToScheduleList:
ftsSchedule = self.ftsClient.ftsSchedule( self.request.RequestID,
self.operation.OperationID,
filesToScheduleList )
if not ftsSchedule["OK"]:
self.log.error( "Completely failed to schedule to FTS:", ftsSchedule["Message"] )
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
if not ftsSchedule:
return S_OK()
self.log.info( "%d files have been scheduled to FTS" % len( ftsSchedule['Successful'] ) )
for opFile in self.operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
gMonitor.addMark( "FTSScheduleOK", 1 )
opFile.Status = "Scheduled"
self.log.debug( "%s has been scheduled for FTS" % opFile.LFN )
elif fileID in ftsSchedule["Failed"]:
gMonitor.addMark( "FTSScheduleFail", 1 )
opFile.Error = ftsSchedule["Failed"][fileID]
if 'sourceSURL equals to targetSURL' in opFile.Error:
# In this case there is no need to continue
opFile.Status = 'Failed'
self.log.warn( "unable to schedule %s for FTS: %s" % ( opFile.LFN, opFile.Error ) )
else:
self.log.info( "No files to schedule after metadata checks" )
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer( fromFTS = True )
def dmTransfer( self, fromFTS = False ):
""" replicate and register using dataManager """
# # get waiting files. If none just return
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
bannedSource = self.checkSEsRSS( sourceSE, 'ReadAccess' )
if not bannedSource["OK"]:
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return bannedSource
if bannedSource["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.info( self.operation.Error )
return S_OK( self.operation.Error )
# # check targetSEs for write
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return bannedTargets
if bannedTargets['Value']:
self.operation.Error = "%s targets are banned for writing" % ",".join( bannedTargets['Value'] )
return S_OK( self.operation.Error )
# Can continue now
self.log.verbose( "No targets banned for writing" )
waitingFiles = self.getWaitingFilesList()
if not waitingFiles:
return S_OK()
# # loop over files
if fromFTS:
self.log.info( "Trying transfer using replica manager as FTS failed" )
else:
self.log.info( "Transferring files using Data manager..." )
for opFile in waitingFiles:
gMonitor.addMark( "ReplicateAndRegisterAtt", 1 )
opFile.Error = ''
lfn = opFile.LFN
# Check if replica is at the specified source
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if not validReplicas:
gMonitor.addMark( "ReplicateFail" )
if noMetaReplicas:
self.log.warn( "unable to replicate '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error( "unable to replicate %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error( "unable to replicate %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn( "unable to replicate %s, could not get a PFN" % opFile.LFN )
continue
# # get the first one in the list
if sourceSE not in validReplicas:
if sourceSE:
self.log.warn( "%s is not at specified sourceSE %s, changed to %s" % ( lfn, sourceSE, validReplicas[0] ) )
sourceSE = validReplicas[0]
# # loop over targetSE
catalogs = self.operation.Catalog
if catalogs:
catalogs = [ cat.strip() for cat in catalogs.split( ',' ) ]
for targetSE in self.operation.targetSEList:
# # call DataManager
if targetSE in validReplicas:
self.log.warn( "Request to replicate %s to an existing location: %s" % ( lfn, targetSE ) )
opFile.Status = 'Done'
continue
res = self.dm.replicateAndRegister( lfn, targetSE, sourceSE = sourceSE, catalog = catalogs )
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
prString = "file %s replicated at %s in %s s." % ( lfn, targetSE, repTime )
gMonitor.addMark( "ReplicateOK", 1 )
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark( "RegisterOK", 1 )
regTime = res["Value"]["Successful"][lfn]["register"]
prString += ' and registered in %s s.' % regTime
self.log.info( prString )
else:
gMonitor.addMark( "RegisterFail", 1 )
prString += " but failed to register"
self.log.warn( prString )
opFile.Error = "Failed to register"
# # add register replica operation
registerOperation = self.getRegisterOperation( opFile, targetSE, type = 'RegisterReplica' )
self.request.insertAfter( registerOperation, self.operation )
else:
self.log.error( "failed to replicate %s to %s." % ( lfn, targetSE ) )
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark( "ReplicateFail", 1 )
reason = res["Value"]["Failed"][lfn]
self.log.error( "failed to replicate and register file %s at %s:" % ( lfn, targetSE ), reason )
opFile.Error = reason
else:
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "DataManager error: %s" % res["Message"]
self.log.error( opFile.Error )
if not opFile.Error:
if len( self.operation.targetSEList ) > 1:
self.log.info( "file %s has been replicated to all targetSEs" % lfn )
opFile.Status = "Done"
return S_OK()
| gpl-3.0 | -1,011,045,608,343,982,200 | 40.060669 | 150 | 0.597493 | false |
JorrandeWit/ithenticate-api-python | iThenticate/API/Object/data.py | 1 | 1868 | class Data(dict):
def __init__(self, xml, status=None, messages=None):
"""Process the xml instance into a friendly dictionary."""
content = {
'data': None,
'status': status or 200,
'messages': messages or []
}
struct_nodes = xml.findall('./')
data = self._breakdown_tree(struct_nodes)
content['data'] = data
dict.__init__(self, content)
def _breakdown_tree(self, nodes):
# All properties in a single item
_data = {}
for node in nodes:
if node.tag == 'member':
# Dictionary item
key = node.find('name').text
value_node = node.find('value')[0]
if value_node.tag == 'int':
value = int(value_node.text.strip())
elif value_node.tag in ['array', 'struct', 'data', 'param']:
value = self._breakdown_tree(value_node.findall('./'))
elif value_node.tag == 'string':
try:
value = value_node.text.strip()
except AttributeError:
# Maliciously constructed data is detected in the responses for the string nodes
value = value_node.text
else:
# dateTime.iso8601 or something exotic
value = value_node.text
_data[key] = value
elif node.tag == 'value':
# Nodes are list items
if not isinstance(_data, list):
_data = []
_data.append(self._breakdown_tree(node.findall('./')))
else:
# Recursively find data as this is not a data node
return self._breakdown_tree(node.findall('./'))
return _data
| bsd-2-clause | 3,485,251,996,502,977,000 | 37.122449 | 104 | 0.478051 | false |
avaitla/Haskell-to-C---Bridge | pygccxml-1.0.0/pygccxml/parser/config.py | 1 | 7092 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""This module contains the implementation of the L{config_t} class.
"""
import os
import sys
import copy
class parser_configuration_t(object):
"""Configuration object to collect parameters for invoking C++ parser
This class serves as a base class for the parameters that can be used
to customize the call to C++ parser. This class also allows users to work with
relative files paths. In this case files are searched in the following order:
1. current directory
2. working directory
3. additional include paths specified by the user
"""
def __init__( self
, working_directory='.'
, include_paths=None
, define_symbols=None
, undefine_symbols=None
, cflags=""
, compiler=None):
"""Constructor.
"""
object.__init__( self )
self.__working_directory = working_directory
if not include_paths:
include_paths = []
self.__include_paths = include_paths
if not define_symbols:
define_symbols = []
self.__define_symbols = define_symbols
if not undefine_symbols:
undefine_symbols = []
self.__undefine_symbols = undefine_symbols
self.__cflags = cflags
self.__compiler = compiler
def clone(self):
raise NotImplementedError( self.__class__.__name__ )
def __get_working_directory(self):
return self.__working_directory
def __set_working_directory(self, working_dir):
self.__working_directory=working_dir
working_directory = property( __get_working_directory, __set_working_directory )
@property
def include_paths(self):
"""list of include paths to look for header files"""
return self.__include_paths
@property
def define_symbols(self):
"""list of "define" directives """
return self.__define_symbols
@property
def undefine_symbols(self):
"""list of "undefine" directives """
return self.__undefine_symbols
@property
def compiler(self):
"""compiler name to simulate"""
return self.__compiler
def __get_cflags(self):
return self.__cflags
def __set_cflags(self, val):
self.__cflags = val
cflags = property( __get_cflags, __set_cflags
, doc="additional flags to pass to compiler" )
def __ensure_dir_exists( self, dir_path, meaning ):
if os.path.isdir( dir_path ):
return
msg = None
if os.path.exists( self.working_directory ):
raise RuntimeError( '%s("%s") does not exist!' % ( meaning, dir_path ) )
else:
raise RuntimeError( '%s("%s") should be "directory", not a file.' % ( meaning, dir_path ) )
def raise_on_wrong_settings( self ):
"""validates the configuration settings and raises RuntimeError on error"""
self.__ensure_dir_exists( self.working_directory, 'working directory' )
map( lambda idir: self.__ensure_dir_exists( idir, 'include directory' )
, self.include_paths )
class gccxml_configuration_t(parser_configuration_t):
"""Configuration object to collect parameters for invoking gccxml.
This class serves as a container for the parameters that can be used
to customize the call to gccxml.
"""
def __init__( self
, gccxml_path=''
, working_directory='.'
, include_paths=None
, define_symbols=None
, undefine_symbols=None
, start_with_declarations=None
, ignore_gccxml_output=False
, cflags=""
, compiler=None):
"""Constructor.
"""
parser_configuration_t.__init__( self
, working_directory=working_directory
, include_paths=include_paths
, define_symbols=define_symbols
, undefine_symbols=undefine_symbols
, cflags=cflags
, compiler=compiler)
self.__gccxml_path = gccxml_path
if not start_with_declarations:
start_with_declarations = []
self.__start_with_declarations = start_with_declarations
self.__ignore_gccxml_output = ignore_gccxml_output
def clone(self):
return copy.deepcopy( self )
def __get_gccxml_path(self):
return self.__gccxml_path
def __set_gccxml_path(self, new_path ):
self.__gccxml_path = new_path
gccxml_path = property( __get_gccxml_path, __set_gccxml_path
, doc="gccxml binary location" )
@property
def start_with_declarations(self):
"""list of declarations gccxml should start with, when it dumps declaration tree"""
return self.__start_with_declarations
def __get_ignore_gccxml_output(self):
return self.__ignore_gccxml_output
def __set_ignore_gccxml_output(self, val=True):
self.__ignore_gccxml_output = val
ignore_gccxml_output = property( __get_ignore_gccxml_output, __set_ignore_gccxml_output
, doc="set this property to True, if you want pygccxml to ignore any error\\warning that comes from gccxml" )
def raise_on_wrong_settings( self ):
super( gccxml_configuration_t, self ).raise_on_wrong_settings()
if os.path.isfile( self.gccxml_path ):
return
if sys.platform == 'win32':
gccxml_name = 'gccxml' + '.exe'
environment_var_delimiter = ';'
elif sys.platform == 'linux2' or sys.platform == 'darwin':
gccxml_name = 'gccxml'
environment_var_delimiter = ':'
else:
raise RuntimeError( 'unable to find out location of gccxml' )
may_be_gccxml = os.path.join( self.gccxml_path, gccxml_name )
if os.path.isfile( may_be_gccxml ):
self.gccxml_path = may_be_gccxml
else:
for path in os.environ['PATH'].split( environment_var_delimiter ):
gccxml_path = os.path.join( path, gccxml_name )
if os.path.isfile( gccxml_path ):
self.gccxml_path = gccxml_path
break
else:
msg = 'gccxml_path("%s") should exists or to be a valid file name.' \
% self.gccxml_path
raise RuntimeError( msg )
config_t = gccxml_configuration_t #backward computability
| bsd-3-clause | 3,308,842,870,037,335,000 | 35.13089 | 145 | 0.558376 | false |
tyarkoni/pliers | pliers/extractors/__init__.py | 1 | 6080 | ''' The `Extractor` hierarchy contains Transformer classes that take a `Stim`
of any type as input and return extracted feature information (rather than
another `Stim` instance).
'''
from .base import Extractor, ExtractorResult, merge_results
from .api import (ClarifaiAPIImageExtractor,
ClarifaiAPIVideoExtractor,
GoogleVisionAPIFaceExtractor,
GoogleVisionAPILabelExtractor,
GoogleVisionAPIPropertyExtractor,
GoogleVisionAPISafeSearchExtractor,
GoogleVisionAPIWebEntitiesExtractor,
GoogleVideoIntelligenceAPIExtractor,
GoogleVideoAPILabelDetectionExtractor,
GoogleVideoAPIShotDetectionExtractor,
GoogleVideoAPIExplicitDetectionExtractor,
GoogleLanguageAPIExtractor,
GoogleLanguageAPIEntityExtractor,
GoogleLanguageAPISentimentExtractor,
GoogleLanguageAPISyntaxExtractor,
GoogleLanguageAPITextCategoryExtractor,
GoogleLanguageAPIEntitySentimentExtractor,
MicrosoftAPIFaceExtractor,
MicrosoftAPIFaceEmotionExtractor,
MicrosoftVisionAPIExtractor,
MicrosoftVisionAPITagExtractor,
MicrosoftVisionAPICategoryExtractor,
MicrosoftVisionAPIImageTypeExtractor,
MicrosoftVisionAPIColorExtractor,
MicrosoftVisionAPIAdultExtractor)
from .audio import (LibrosaFeatureExtractor,
STFTAudioExtractor,
MeanAmplitudeExtractor,
SpectralCentroidExtractor,
SpectralBandwidthExtractor,
SpectralContrastExtractor,
SpectralRolloffExtractor,
PolyFeaturesExtractor,
ZeroCrossingRateExtractor,
ChromaSTFTExtractor,
ChromaCQTExtractor,
ChromaCENSExtractor,
MelspectrogramExtractor,
MFCCExtractor,
TonnetzExtractor,
TempogramExtractor,
RMSExtractor,
SpectralFlatnessExtractor,
OnsetDetectExtractor,
OnsetStrengthMultiExtractor,
TempoExtractor,
BeatTrackExtractor,
HarmonicExtractor,
PercussiveExtractor,
AudiosetLabelExtractor)
from .image import (BrightnessExtractor, SaliencyExtractor, SharpnessExtractor,
VibranceExtractor, FaceRecognitionFaceEncodingsExtractor,
FaceRecognitionFaceLandmarksExtractor,
FaceRecognitionFaceLocationsExtractor)
from .misc import MetricExtractor
from .models import TensorFlowKerasApplicationExtractor
from .text import (ComplexTextExtractor, DictionaryExtractor,
PredefinedDictionaryExtractor, LengthExtractor,
NumUniqueWordsExtractor, PartOfSpeechExtractor,
WordEmbeddingExtractor, TextVectorizerExtractor,
VADERSentimentExtractor, SpaCyExtractor,
WordCounterExtractor, BertExtractor,
BertSequenceEncodingExtractor, BertLMExtractor,
BertSentimentExtractor)
from .video import (FarnebackOpticalFlowExtractor)
__all__ = [
'Extractor',
'ExtractorResult',
'ClarifaiAPIImageExtractor',
'ClarifaiAPIVideoExtractor',
'STFTAudioExtractor',
'MeanAmplitudeExtractor',
'LibrosaFeatureExtractor',
'SpectralCentroidExtractor',
'SpectralBandwidthExtractor',
'SpectralContrastExtractor',
'SpectralRolloffExtractor',
'PolyFeaturesExtractor',
'ZeroCrossingRateExtractor',
'ChromaSTFTExtractor',
'ChromaCQTExtractor',
'ChromaCENSExtractor',
'MelspectrogramExtractor',
'MFCCExtractor',
'TonnetzExtractor',
'TempogramExtractor',
'GoogleVisionAPIFaceExtractor',
'GoogleVisionAPILabelExtractor',
'GoogleVisionAPIPropertyExtractor',
'GoogleVisionAPISafeSearchExtractor',
'GoogleVisionAPIWebEntitiesExtractor',
'GoogleVideoIntelligenceAPIExtractor',
'GoogleVideoAPILabelDetectionExtractor',
'GoogleVideoAPIShotDetectionExtractor',
'GoogleVideoAPIExplicitDetectionExtractor',
'GoogleLanguageAPIExtractor',
'GoogleLanguageAPIEntityExtractor',
'GoogleLanguageAPISentimentExtractor',
'GoogleLanguageAPISyntaxExtractor',
'GoogleLanguageAPITextCategoryExtractor',
'GoogleLanguageAPIEntitySentimentExtractor',
'BrightnessExtractor',
'SaliencyExtractor',
'SharpnessExtractor',
'VibranceExtractor',
'FaceRecognitionFaceEncodingsExtractor',
'FaceRecognitionFaceLandmarksExtractor',
'FaceRecognitionFaceLocationsExtractor',
'MicrosoftAPIFaceExtractor',
'MicrosoftAPIFaceEmotionExtractor',
'MicrosoftVisionAPIExtractor',
'MicrosoftVisionAPITagExtractor',
'MicrosoftVisionAPICategoryExtractor',
'MicrosoftVisionAPIImageTypeExtractor',
'MicrosoftVisionAPIColorExtractor',
'MicrosoftVisionAPIAdultExtractor',
'TensorFlowKerasApplicationExtractor',
'ComplexTextExtractor',
'DictionaryExtractor',
'PredefinedDictionaryExtractor',
'LengthExtractor',
'NumUniqueWordsExtractor',
'PartOfSpeechExtractor',
'FarnebackOpticalFlowExtractor',
'WordEmbeddingExtractor',
'TextVectorizerExtractor',
'VADERSentimentExtractor',
'merge_results',
'SpaCyExtractor',
'RMSExtractor',
'SpectralFlatnessExtractor'
'OnsetDetectExtractor',
'OnsetStrengthMultiExtractor',
'TempoExtractor',
'BeatTrackExtractor',
'HarmonicExtractor',
'PercussiveExtractor',
'BertExtractor',
'BertSequenceEncodingExtractor',
'BertLMExtractor',
'BertSentimentExtractor',
'AudiosetLabelExtractor',
'WordCounterExtractor',
'MetricExtractor'
]
| bsd-3-clause | 4,057,292,330,107,834,000 | 39 | 79 | 0.675329 | false |
LookThisCode/DeveloperBus | Season 2013/Mexico/Projects/Equipo4_MES/website/MES/settings.py | 1 | 2949 | """
Django settings for MES project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'social.apps.django_app.default',
'south',
'crispy_forms',
'accounts',
'routes'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'MES.urls'
WSGI_APPLICATION = 'MES.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
AUTHENTICATION_BACKENDS = (
'social.backends.google.GooglePlusAuth',
'social.apps.django_app.utils.BackendWrapper',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "assets"),
)
SOCIAL_AUTH_GOOGLE_PLUS_KEY = ''
SOCIAL_AUTH_GOOGLE_PLUS_SECRET = ''
LOGIN_REDIRECT_URL = '/' | apache-2.0 | 1,220,421,934,049,439,200 | 22.6 | 71 | 0.71414 | false |
open-mmlab/mmdetection | configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py | 1 | 2049 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| apache-2.0 | -5,928,356,347,963,705,000 | 32.048387 | 74 | 0.495852 | false |
AlericInglewood/3p-google-breakpad | src/tools/gyp/pylib/gyp/__init__.py | 1 | 20572 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message):
if 'all' in gyp.debug.keys() or mode in gyp.debug.keys():
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False, circular_check=False):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, False)
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
home_dot_gyp = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
# TODO(thomasvl): add support for ~/.gyp/defaults
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
else:
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s" % cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check,
options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -573,254,012,917,891,500 | 38.791103 | 81 | 0.651031 | false |
COSMOGRAIL/COSMOULINE | pipe/modules/asciidata/asciiheader.py | 1 | 10614 | """
Various header classes to be part of the asciidata class
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: [email protected]
@since: 2005/09/13
$LastChangedBy: mkuemmel $
$LastChangedDate: 2008-01-08 18:13:38 +0100 (Tue, 08 Jan 2008) $
$HeadURL: http://astropy.scipy.org/svn/astrolib/trunk/asciidata/Lib/asciiheader.py $
"""
__version__ = "Version 1.1 $LastChangedRevision: 329 $"
import string
import re
from asciiutils import *
class Header(object):
"""
The header object
This object offers the possibility to store additional
information such as change comments or column information.
This additional information may just be present at the
beginning of the data file or later be added.
"""
def __init__(self, filename=None, comment_char=None):
"""
Constructor for the Header class
@param filename: the data file
@type filename: string
@param comment_char: the comment_char string
@type comment_char: string
"""
# store the comment_char
self._comment_char = comment_char
# Fullhdata contains the full nonparsed header - probably
# superflupus now
self.Fullhdata = []
# CollInfo is a list of column names extracted from the header
# please note that only is current at readintime and is currently
# not updated when columns are changed
self.CollInfo = []
# SexVectorColls are the known sextractor output parameters which
# come as vectors
self.SexVectorColls = ('MAG_APER','MAGERR_APER','FLUX_RADIUS','FLUX_APER','FLUXERR_APER','VECTOR_SOMFIT','VECTOR_ASSOC','FLUX_GROWTH','VIGNET','VIGNET_SHIFT')
# SExtarctorFlag marks whether sextractorlike header information
# was parsed
self.SExtractorFlag = False
# retrieve the comment from the data file
# hdata is the header minus the column info lines
# in case the header column info is invalid at loading hdata defaults to Fullhdata
if filename == None:
self.hdata = []
else:
self.hdata = self._load_header(filename, comment_char)
# set the number of elements
self._nentry = len(self.hdata)
def __getitem__(self, index):
"""
Defines the list operator for indexing
The method returns the indexed header entry,
if it exists. An error is raised otherwise
@param index: the index of the header entry to be returned
@type index: integer
@return: a header line
@rtype: string
"""
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# return the desired header entry
return self.hdata[index]
def __setitem__(self, index, hentry):
"""
Defines the list operator for indexed assignement
@param element: either column index or name
@type element: string/integer
@param column: the column to assign to an index
@type column: AsciiColumn
"""
# check whether the target index exists;
# raise error if not
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# split the string to lines
hitems = string.split(string.strip(hentry),'\n')
# check whether more than one line
# wants to be added
if len(hitems) > 1:
raise Exception('Only one line can be set!')
# replace the header entry,
# add a newline if necessary
if hentry[-1] != '\n':
self.hdata[index] = hentry + '\n'
else:
self.hdata[index] = hentry
def __delitem__(self, index):
"""
Deletes an index.
@param index: the index of the header item to be deleted
@type index: integer
"""
# check whether the target index exists;
# raise error if not
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# delete the column
del self.hdata[index]
# adjust the number of entries
self._nentry -= 1
def __str__(self):
"""
Defines a string method for the object
@return: the string representation
@rtype: string
"""
# start the string
hstring = ''
# add the different items
for line in self.hdata:
if len(line) > 0:
hstring += self._comment_char + line
else:
hstring += self._comment_char + '\n'
# return the string
return hstring
def __iter__(self):
"""
Provide an iterator object.
The function provides and returns an interator object
for the AstroAsciiData class. Due to this iterator object
sequences like:
for column in ascii_data_object:
<do something with column>
are possible.
"""
return AsciiLenGetIter(self)
def __len__(self):
"""
The length operator
@param length: the length of the instance
@type length: integer
"""
# thats rather trivial
length = self._nentry
# return the length
return length
def append(self, hlist):
"""
Append something to the header data
@param hlist: the string to append
@type hlist: string
"""
# split the string to lines
hitems = string.split(hlist,'\n')
# for each line
for item in hitems:
# append the new content
# to the header content
self.hdata.append(item+'\n')
self._nentry += 1
def _load_header(self, filename, comment_char):
"""
Loads the header from the data file
@param filename: the data file
@type filename: string
@param comment_char: the comment_char string
@type comment_char: string
"""
# start the item list
data = []
lastcoll,currcoll =0,0
lastname =''
# Define patterns for some common header formats
commentpattern = re.compile(comment_char)
sextractor_header = re.compile('^#\s*(\d+)\s+([+*-/()\w]+)([^\[]*)(\[\w+\])?(.*)\n')
# open the data file and go over its rows
for line in file(filename, 'r'):
if commentpattern.match(line):
#append everything after the comment_char separator to Fullhdata
line_with_comment_char_stripped_off = commentpattern.sub('',line,count=1)
self.Fullhdata.append(line_with_comment_char_stripped_off)
SEmatch = sextractor_header.match(line)
if SEmatch: #sextractor_header.match(line):
# seems we have a SExtractorheader
if not self.SExtractorFlag:
self.SExtractorFlag = True
groups = SEmatch.groups()
currcoll = int(groups[0])
name = groups[1]
if currcoll <= lastcoll:
#ignore multiple and definitions out of order
continue
if currcoll > (lastcoll +1):
# print currcoll,lastcoll
# we jumped some lines, pad CollInfo
vcounter = 1
while (lastcoll +1) < currcoll:
if lastname in self.SexVectorColls:
self.CollInfo.append({'NAME':lastname+str(vcounter)})
vcounter +=1
else:
self.CollInfo.append(None)
lastcoll +=1
self.CollInfo.append({'NAME':name})
lastcoll = currcoll
lastname = name
if groups[3]:
# a unit was extracted
self.CollInfo[-1]['UNIT'] = str(groups[3].strip('[]'))
if groups[2] or groups[4]:
self.CollInfo[-1]['COMMENT'] =''
self.CollInfo[-1]['COMMENT'] += groups[2].strip()
if groups[2] and groups[4]:
self.CollInfo[-1]['COMMENT'] += ' '
self.CollInfo[-1]['COMMENT'] += groups[4].strip()
else:
data.append(line_with_comment_char_stripped_off)
else:
# leave the file at the first
# non-comment line
break
return data
def reset(self):
"""
Reset the header
"""
self.hdata = []
self._nentry = 0
def set_comment_char(self, comment_char):
"""
Set the comment_char string
@param comment_char: the new comment_char string
@type comment_char: string
"""
self._comment_char = comment_char
def getCollInfo(self,index):
"""
Robustly return column info from header
returns (columnname,unit,comment)
@param index: The column index
@type index: int
"""
#default values
name = 'column' + str(index+1)
unit = None
comment = None
if index < len(self.CollInfo):
if self.CollInfo[index]:
if self.CollInfo[index].has_key('NAME'):
name = str(self.CollInfo[index]['NAME'])
if self.CollInfo[index].has_key('UNIT'):
unit = str(self.CollInfo[index]['UNIT'])
if self.CollInfo[index].has_key('COMMENT'):
comment = str(self.CollInfo[index]['COMMENT'])
else:
# is the very last column in the list a known vector?
if self.CollInfo[-1]['NAME'] in self.SexVectorColls:
name = self.CollInfo[-1]['NAME']+str(index-len(self.CollInfo)+1)
# return name, unit, comment of the column
return name, unit, comment
| gpl-3.0 | 2,816,544,044,639,738,000 | 31.658462 | 166 | 0.543904 | false |
ninapavlich/scout-and-rove | scoutandrove/apps/sr/views.py | 1 | 1246 | from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.utils import timezone
from .models import *
class SiteProfileListView(ListView):
model = SiteProfile
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return self.model.get_profiles_for_user(self.request.user)
class SiteProfileDetailView(DetailView):
model = SiteProfile
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SiteProfileDetailView, self).get_context_data(**kwargs)
context['settings'] = self.object.get_settings()
context['test_result_sets'] = self.object.get_test_result_sets()
context['can_edit'] = self.object.can_user_edit(self.request.user)
return context
class TestResultSetDetailView(DetailView):
model = TestResultSet
| mit | -6,453,812,661,262,494,000 | 32.675676 | 79 | 0.719101 | false |
AtteqCom/zsl | src/zsl/testing/db.py | 1 | 4656 | """
:mod:`zsl.testing.db`
---------------------
This module allows for database unit testing. For how to use the database
testing in practice, a sample, refer to :ref:`unit-testing-db`.
The module works in the following way (methods setUp, tearDown):
1. Each test runs in a single transaction.
2. This transaction is always called a rollback.
All the tests are run in a single parent transaction (setUpClass,
tearDownClass):
1. In general initialization phase the session/transaction is created
and it is kept during all the testing. Also the database schema is created.
2. After this the transaction is called rollback.
This means that the tests may be conducted in the in the memory database
or a persistent one which is kept clean.
The module provides class :class:`.TestSessionFactory` - it always returns
the same session. Also one should add :class:`.DbTestModule` to the test
container when creating Zsl instance, see :ref:`unit-testing-zsl-instance`.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import logging
from injector import Module, provides, singleton
from sqlalchemy.engine import Engine
from sqlalchemy.orm.session import Session
from zsl import inject
from zsl.application.modules.alchemy_module import TransactionHolder, TransactionHolderFactory
from zsl.db.model.sql_alchemy import metadata
from zsl.service.service import SessionFactory
class TestSessionFactory(SessionFactory):
"""Factory always returning the single test transaction."""
_test_session = None
@inject(engine=Engine)
def create_test_session(self, engine):
# type: (Engine) -> Session
assert TestSessionFactory._test_session is None
metadata.bind = engine
metadata.create_all(engine)
logging.getLogger(__name__).debug("Create test session - begin test session/setUp")
TestSessionFactory._test_session = self._session_holder()
TestSessionFactory._test_session.autoflush = True
TestSessionFactory._test_session.begin_nested()
assert TestSessionFactory._test_session is not None
return TestSessionFactory._test_session
def create_session(self):
logging.getLogger(__name__).debug("Create test session")
assert TestSessionFactory._test_session is not None
return TestSessionFactory._test_session
def close_test_session(self):
TestSessionFactory._test_session.rollback()
TestSessionFactory._test_session.close()
TestSessionFactory._test_session = None
logging.getLogger(__name__).debug("Close test session - close test test session/tearDown")
class TestTransactionHolder(TransactionHolder):
def begin(self):
self.session.begin_nested()
def close(self):
logging.getLogger(__name__).debug("Close.")
self._orm = None
self._in_transaction = False
class TestTransactionHolderFactory(TransactionHolderFactory):
def create_transaction_holder(self):
return TestTransactionHolder()
class DbTestModule(Module):
"""Module fixing the :class:`zsl.service.service.SessionFactory`
to our :class:`.TestSessionFactory`."""
@provides(SessionFactory, scope=singleton)
def get_session_factory(self):
# type: ()->SessionFactory
return TestSessionFactory()
@provides(TestSessionFactory, scope=singleton)
@inject(session_factory=SessionFactory)
def get_test_session_factory(self, session_factory):
# type: (SessionFactory)->SessionFactory
return session_factory
@provides(TransactionHolderFactory, scope=singleton)
def provide_transaction_holder_factory(self):
return TestTransactionHolderFactory()
class DbTestCase(object):
""":class:`.DbTestCase` is a mixin to be used when testing with
a database."""
_session = None
@inject(session_factory=TestSessionFactory)
def setUp(self, session_factory):
# type: (TestSessionFactory)->None
super(DbTestCase, self).setUp()
logging.getLogger(__name__).debug("DbTestCase.setUp")
session_factory.create_test_session()
@inject(session_factory=TestSessionFactory)
def tearDown(self, session_factory):
# type: (TestSessionFactory)->None
# This will return the same transaction/session
# as the one used in setUp.
logging.getLogger(__name__).debug("DbTestCase.tearDown")
session_factory.close_test_session()
super(DbTestCase, self).tearDown()
IN_MEMORY_DB_SETTINGS = {
'DATABASE_URI': 'sqlite:///:memory:',
'DATABASE_ENGINE_PROPS': {},
'JSON_AS_ASCII': False
}
| mit | -5,466,490,249,602,445,000 | 34.815385 | 98 | 0.716065 | false |
googleapis/googleapis-gen | google/cloud/securitycenter/v1beta1/securitycenter-v1beta1-py/google/cloud/securitycenter_v1beta1/services/security_center/transports/grpc_asyncio.py | 1 | 34248 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.securitycenter_v1beta1.types import finding
from google.cloud.securitycenter_v1beta1.types import finding as gcs_finding
from google.cloud.securitycenter_v1beta1.types import organization_settings
from google.cloud.securitycenter_v1beta1.types import organization_settings as gcs_organization_settings
from google.cloud.securitycenter_v1beta1.types import security_marks as gcs_security_marks
from google.cloud.securitycenter_v1beta1.types import securitycenter_service
from google.cloud.securitycenter_v1beta1.types import source
from google.cloud.securitycenter_v1beta1.types import source as gcs_source
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from .base import SecurityCenterTransport, DEFAULT_CLIENT_INFO
from .grpc import SecurityCenterGrpcTransport
class SecurityCenterGrpcAsyncIOTransport(SecurityCenterTransport):
"""gRPC AsyncIO backend transport for SecurityCenter.
V1 Beta APIs for Security Center service.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'securitycenter.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'securitycenter.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_source(self) -> Callable[
[securitycenter_service.CreateSourceRequest],
Awaitable[gcs_source.Source]]:
r"""Return a callable for the create source method over gRPC.
Creates a source.
Returns:
Callable[[~.CreateSourceRequest],
Awaitable[~.Source]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_source' not in self._stubs:
self._stubs['create_source'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/CreateSource',
request_serializer=securitycenter_service.CreateSourceRequest.serialize,
response_deserializer=gcs_source.Source.deserialize,
)
return self._stubs['create_source']
@property
def create_finding(self) -> Callable[
[securitycenter_service.CreateFindingRequest],
Awaitable[gcs_finding.Finding]]:
r"""Return a callable for the create finding method over gRPC.
Creates a finding. The corresponding source must
exist for finding creation to succeed.
Returns:
Callable[[~.CreateFindingRequest],
Awaitable[~.Finding]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_finding' not in self._stubs:
self._stubs['create_finding'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/CreateFinding',
request_serializer=securitycenter_service.CreateFindingRequest.serialize,
response_deserializer=gcs_finding.Finding.deserialize,
)
return self._stubs['create_finding']
@property
def get_iam_policy(self) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy on the specified
Source.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_iam_policy' not in self._stubs:
self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/GetIamPolicy',
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['get_iam_policy']
@property
def get_organization_settings(self) -> Callable[
[securitycenter_service.GetOrganizationSettingsRequest],
Awaitable[organization_settings.OrganizationSettings]]:
r"""Return a callable for the get organization settings method over gRPC.
Gets the settings for an organization.
Returns:
Callable[[~.GetOrganizationSettingsRequest],
Awaitable[~.OrganizationSettings]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_organization_settings' not in self._stubs:
self._stubs['get_organization_settings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/GetOrganizationSettings',
request_serializer=securitycenter_service.GetOrganizationSettingsRequest.serialize,
response_deserializer=organization_settings.OrganizationSettings.deserialize,
)
return self._stubs['get_organization_settings']
@property
def get_source(self) -> Callable[
[securitycenter_service.GetSourceRequest],
Awaitable[source.Source]]:
r"""Return a callable for the get source method over gRPC.
Gets a source.
Returns:
Callable[[~.GetSourceRequest],
Awaitable[~.Source]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_source' not in self._stubs:
self._stubs['get_source'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/GetSource',
request_serializer=securitycenter_service.GetSourceRequest.serialize,
response_deserializer=source.Source.deserialize,
)
return self._stubs['get_source']
@property
def group_assets(self) -> Callable[
[securitycenter_service.GroupAssetsRequest],
Awaitable[securitycenter_service.GroupAssetsResponse]]:
r"""Return a callable for the group assets method over gRPC.
Filters an organization's assets and groups them by
their specified properties.
Returns:
Callable[[~.GroupAssetsRequest],
Awaitable[~.GroupAssetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'group_assets' not in self._stubs:
self._stubs['group_assets'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/GroupAssets',
request_serializer=securitycenter_service.GroupAssetsRequest.serialize,
response_deserializer=securitycenter_service.GroupAssetsResponse.deserialize,
)
return self._stubs['group_assets']
@property
def group_findings(self) -> Callable[
[securitycenter_service.GroupFindingsRequest],
Awaitable[securitycenter_service.GroupFindingsResponse]]:
r"""Return a callable for the group findings method over gRPC.
Filters an organization or source's findings and groups them by
their specified properties.
To group across all sources provide a ``-`` as the source id.
Example:
/v1beta1/organizations/{organization_id}/sources/-/findings
Returns:
Callable[[~.GroupFindingsRequest],
Awaitable[~.GroupFindingsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'group_findings' not in self._stubs:
self._stubs['group_findings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/GroupFindings',
request_serializer=securitycenter_service.GroupFindingsRequest.serialize,
response_deserializer=securitycenter_service.GroupFindingsResponse.deserialize,
)
return self._stubs['group_findings']
@property
def list_assets(self) -> Callable[
[securitycenter_service.ListAssetsRequest],
Awaitable[securitycenter_service.ListAssetsResponse]]:
r"""Return a callable for the list assets method over gRPC.
Lists an organization's assets.
Returns:
Callable[[~.ListAssetsRequest],
Awaitable[~.ListAssetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_assets' not in self._stubs:
self._stubs['list_assets'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/ListAssets',
request_serializer=securitycenter_service.ListAssetsRequest.serialize,
response_deserializer=securitycenter_service.ListAssetsResponse.deserialize,
)
return self._stubs['list_assets']
@property
def list_findings(self) -> Callable[
[securitycenter_service.ListFindingsRequest],
Awaitable[securitycenter_service.ListFindingsResponse]]:
r"""Return a callable for the list findings method over gRPC.
Lists an organization or source's findings.
To list across all sources provide a ``-`` as the source id.
Example:
/v1beta1/organizations/{organization_id}/sources/-/findings
Returns:
Callable[[~.ListFindingsRequest],
Awaitable[~.ListFindingsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_findings' not in self._stubs:
self._stubs['list_findings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/ListFindings',
request_serializer=securitycenter_service.ListFindingsRequest.serialize,
response_deserializer=securitycenter_service.ListFindingsResponse.deserialize,
)
return self._stubs['list_findings']
@property
def list_sources(self) -> Callable[
[securitycenter_service.ListSourcesRequest],
Awaitable[securitycenter_service.ListSourcesResponse]]:
r"""Return a callable for the list sources method over gRPC.
Lists all sources belonging to an organization.
Returns:
Callable[[~.ListSourcesRequest],
Awaitable[~.ListSourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_sources' not in self._stubs:
self._stubs['list_sources'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/ListSources',
request_serializer=securitycenter_service.ListSourcesRequest.serialize,
response_deserializer=securitycenter_service.ListSourcesResponse.deserialize,
)
return self._stubs['list_sources']
@property
def run_asset_discovery(self) -> Callable[
[securitycenter_service.RunAssetDiscoveryRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the run asset discovery method over gRPC.
Runs asset discovery. The discovery is tracked with a
long-running operation.
This API can only be called with limited frequency for an
organization. If it is called too frequently the caller will
receive a TOO_MANY_REQUESTS error.
Returns:
Callable[[~.RunAssetDiscoveryRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'run_asset_discovery' not in self._stubs:
self._stubs['run_asset_discovery'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/RunAssetDiscovery',
request_serializer=securitycenter_service.RunAssetDiscoveryRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['run_asset_discovery']
@property
def set_finding_state(self) -> Callable[
[securitycenter_service.SetFindingStateRequest],
Awaitable[finding.Finding]]:
r"""Return a callable for the set finding state method over gRPC.
Updates the state of a finding.
Returns:
Callable[[~.SetFindingStateRequest],
Awaitable[~.Finding]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_finding_state' not in self._stubs:
self._stubs['set_finding_state'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/SetFindingState',
request_serializer=securitycenter_service.SetFindingStateRequest.serialize,
response_deserializer=finding.Finding.deserialize,
)
return self._stubs['set_finding_state']
@property
def set_iam_policy(self) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on the specified
Source.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_iam_policy' not in self._stubs:
self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/SetIamPolicy',
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['set_iam_policy']
@property
def test_iam_permissions(self) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns the permissions that a caller has on the
specified source.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'test_iam_permissions' not in self._stubs:
self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/TestIamPermissions',
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs['test_iam_permissions']
@property
def update_finding(self) -> Callable[
[securitycenter_service.UpdateFindingRequest],
Awaitable[gcs_finding.Finding]]:
r"""Return a callable for the update finding method over gRPC.
Creates or updates a finding. The corresponding
source must exist for a finding creation to succeed.
Returns:
Callable[[~.UpdateFindingRequest],
Awaitable[~.Finding]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_finding' not in self._stubs:
self._stubs['update_finding'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/UpdateFinding',
request_serializer=securitycenter_service.UpdateFindingRequest.serialize,
response_deserializer=gcs_finding.Finding.deserialize,
)
return self._stubs['update_finding']
@property
def update_organization_settings(self) -> Callable[
[securitycenter_service.UpdateOrganizationSettingsRequest],
Awaitable[gcs_organization_settings.OrganizationSettings]]:
r"""Return a callable for the update organization settings method over gRPC.
Updates an organization's settings.
Returns:
Callable[[~.UpdateOrganizationSettingsRequest],
Awaitable[~.OrganizationSettings]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_organization_settings' not in self._stubs:
self._stubs['update_organization_settings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/UpdateOrganizationSettings',
request_serializer=securitycenter_service.UpdateOrganizationSettingsRequest.serialize,
response_deserializer=gcs_organization_settings.OrganizationSettings.deserialize,
)
return self._stubs['update_organization_settings']
@property
def update_source(self) -> Callable[
[securitycenter_service.UpdateSourceRequest],
Awaitable[gcs_source.Source]]:
r"""Return a callable for the update source method over gRPC.
Updates a source.
Returns:
Callable[[~.UpdateSourceRequest],
Awaitable[~.Source]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_source' not in self._stubs:
self._stubs['update_source'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/UpdateSource',
request_serializer=securitycenter_service.UpdateSourceRequest.serialize,
response_deserializer=gcs_source.Source.deserialize,
)
return self._stubs['update_source']
@property
def update_security_marks(self) -> Callable[
[securitycenter_service.UpdateSecurityMarksRequest],
Awaitable[gcs_security_marks.SecurityMarks]]:
r"""Return a callable for the update security marks method over gRPC.
Updates security marks.
Returns:
Callable[[~.UpdateSecurityMarksRequest],
Awaitable[~.SecurityMarks]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_security_marks' not in self._stubs:
self._stubs['update_security_marks'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1beta1.SecurityCenter/UpdateSecurityMarks',
request_serializer=securitycenter_service.UpdateSecurityMarksRequest.serialize,
response_deserializer=gcs_security_marks.SecurityMarks.deserialize,
)
return self._stubs['update_security_marks']
__all__ = (
'SecurityCenterGrpcAsyncIOTransport',
)
| apache-2.0 | 8,748,963,436,555,073,000 | 44.908847 | 104 | 0.629351 | false |
solaris765/PythonProjects | rps.py | 1 | 2268 | '''
A simple Rock Paper Scissors game
Algorithm:
1. Have the user input r, p, or s
2. Have the computer choose a random integer (1-3)
3. Tell the user the result of the game
Todo:
- Finish mode business
- Detect if a user is repeatedly throwing the same thing and counter that
- Sweet graphics(?)
- ???
- Profit?
'''
#Housekeeping
from random import randint
from random import seed
#from collections import Counter
seed()
a = 1
rock = 0
paper = 0
scissors = 0
playerlist = []
#Defining functions
def increment(x):
#Function to incrememnt global vars based on user input
if x == "r":
global rock
rock = rock+1
elif x == "p":
global paper
paper = paper+1
elif x == "s":
global scissors
scissors = scissors+1
def decon(x):
#Functions to convert x to numbers for math
if x == "r":
x = 1
elif x == "p":
x = 2
elif x == "s":
x = 3
return x
def convert(x):
#Function to convert x to letters for printing
if x == 1:
x = "rock"
elif x == 2:
x = "paper"
elif x == 3:
x = "scissors"
return x
def save_presult(x):
#Function to append x to a list and save to txt
out_file = open("player_rps_results.txt", "wt")
out_file.write(', '.join(playerlist))
out_file.write("\n")
out_file.close()
def get_pmost_common(x):
#Function to read the mode of x from the txt
in_file = open("player_rps_results.txt", "rt")
plist = in_file.read()
in_file.close()
pmc = plist.most_common()
return plist.most_common()
#The important stuff
print("input q to quit")
print("r, p, s")
names = ['rock', 'paper', 'scissors']
while a == 1:
x = str(input("Throw: "))
if x!="r" and x!="p" and x!="s" and x!="q":
continue
elif x == "r" or x == "s" or x == "p":
increment(x)
cpu = randint(1, 3)
player_result = ["ties with", "beats", "loses to"]
result = player_result[(decon(x) - cpu) % 3]
#Print result
print(str(convert(decon((x)))).capitalize() + " " + str(result) + " " + str(convert(cpu)))
elif x == "q":
print("Goodbye")
#Print player results to txt file
save_presult(decon(x))
break
| unlicense | 4,312,985,369,166,982,700 | 22.381443 | 98 | 0.571429 | false |
PersianWikipedia/pywikibot-core | tests/textlib_tests.py | 1 | 75779 | # -*- coding: utf-8 -*-
"""Test textlib module."""
#
# (C) Pywikibot team, 2011-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
from collections import OrderedDict
import functools
import os
import re
import pywikibot
import pywikibot.textlib as textlib
from pywikibot.site import _IWEntry
from pywikibot.textlib import _MultiTemplateMatchBuilder, extract_sections
from pywikibot.tools import suppress_warnings
from pywikibot import UnknownSite
from tests.aspects import (
unittest, require_modules, TestCase, DefaultDrySiteTestCase,
PatchingTestCase, SiteAttributeTestCase,
)
from tests import mock
files = {}
dirname = os.path.join(os.path.dirname(__file__), 'pages')
for f in ['enwiki_help_editing']:
with codecs.open(os.path.join(dirname, f + '.page'),
'r', 'utf-8') as content:
files[f] = content.read()
class TestSectionFunctions(TestCase):
"""Test wikitext section handling function."""
net = False
def setUp(self):
"""Setup tests."""
self.catresult1 = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
super(TestSectionFunctions, self).setUp()
def contains(self, fn, sn):
"""Invoke does_text_contain_section()."""
return textlib.does_text_contain_section(
files[fn], sn)
def assertContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] contains sn."""
self.assertEqual(self.contains(fn, sn), True, *args, **kwargs)
def assertNotContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] does not contain sn."""
self.assertEqual(self.contains(fn, sn), False, *args, **kwargs)
def testCurrentBehaviour(self):
"""Test that 'Editing' is found."""
self.assertContains('enwiki_help_editing', 'Editing')
def testSpacesInSection(self):
"""Test with spaces in section."""
self.assertContains('enwiki_help_editing', 'Minor_edits')
self.assertNotContains('enwiki_help_editing', '#Minor edits',
"Incorrect, '#Minor edits' does not work")
self.assertNotContains('enwiki_help_editing', 'Minor Edits',
'section hashes are case-sensitive')
self.assertNotContains('enwiki_help_editing', 'Minor_Edits',
'section hashes are case-sensitive')
@unittest.expectedFailure # TODO: T133276
def test_encoded_chars_in_section(self):
"""Test encoded chars in section."""
self.assertContains(
'enwiki_help_editing', 'Talk_.28discussion.29_pages',
'As used in the TOC')
def test_underline_characters_in_section(self):
"""Test with underline chars in section."""
self.assertContains('enwiki_help_editing', 'Talk_(discussion)_pages',
'Understood by mediawiki')
def test_spaces_outside_section(self):
"""Test with spaces around section."""
self.assertContains('enwiki_help_editing', 'Naming and_moving')
self.assertContains('enwiki_help_editing', ' Naming and_moving ')
self.assertContains('enwiki_help_editing', ' Naming and_moving_')
def test_link_in_section(self):
"""Test with link inside section."""
# section is ==[[Wiki markup]]==
self.assertContains('enwiki_help_editing', '[[Wiki markup]]',
'Link as section header')
self.assertContains('enwiki_help_editing', '[[:Wiki markup]]',
'section header link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Wiki markup',
'section header must be a link')
# section is ===[[:Help]]ful tips===
self.assertContains('enwiki_help_editing', '[[Help]]ful tips',
'Containing link')
self.assertContains('enwiki_help_editing', '[[:Help]]ful tips',
'Containing link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Helpful tips',
'section header must contain a link')
class TestFormatInterwiki(TestCase):
"""Test format functions."""
family = 'wikipedia'
code = 'en'
cached = True
def test_interwiki_format_Page(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Page(pywikibot.Link('de:German', self.site)),
'fr': pywikibot.Page(pywikibot.Link('fr:French', self.site))
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
def test_interwiki_format_Link(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Link('de:German', self.site),
'fr': pywikibot.Link('fr:French', self.site),
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
class TestFormatCategory(DefaultDrySiteTestCase):
"""Test category formatting."""
catresult = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
def test_category_format_raw(self):
"""Test formatting categories as strings formatted as links."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['[[Category:Cat1]]',
'[[Category:Cat2]]'],
self.site))
def test_category_format_bare(self):
"""Test formatting categories as strings."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['Cat1', 'Cat2'], self.site))
def test_category_format_Category(self):
"""Test formatting categories as Category instances."""
data = [pywikibot.Category(self.site, 'Cat1'),
pywikibot.Category(self.site, 'Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
def test_category_format_Page(self):
"""Test formatting categories as Page instances."""
data = [pywikibot.Page(self.site, 'Category:Cat1'),
pywikibot.Page(self.site, 'Category:Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
class TestCategoryRearrangement(DefaultDrySiteTestCase):
"""
Ensure that sorting keys are not being lost.
Tests .getCategoryLinks() and .replaceCategoryLinks(),
with both a newline and an empty string as separators.
"""
old = ('[[Category:Cat1]]\n[[Category:Cat2|]]\n'
'[[Category:Cat1| ]]\n[[Category:Cat2|key]]')
def test_standard_links(self):
"""Test getting and replacing categories."""
cats = textlib.getCategoryLinks(self.old, site=self.site)
new = textlib.replaceCategoryLinks(self.old, cats, site=self.site)
self.assertEqual(self.old, new)
def test_indentation(self):
"""Test indentation from previous block."""
# Block of text
old = 'Some text\n\n' + self.old
cats = textlib.getCategoryLinks(old, site=self.site)
new = textlib.replaceCategoryLinks(old, cats, site=self.site)
self.assertEqual(old, new)
# DEFAULTSORT
old_ds = '{{DEFAULTSORT:key}}\n' + self.old
cats_ds = textlib.getCategoryLinks(old_ds, site=self.site)
new_ds = textlib.replaceCategoryLinks(old_ds, cats_ds, site=self.site)
self.assertEqual(old_ds, new_ds)
def test_in_place_replace(self):
"""Test in-place category change is reversible."""
dummy = pywikibot.Category(self.site, 'foo')
dummy.sortKey = 'bah'
cats = textlib.getCategoryLinks(self.old, site=self.site)
# Sanity checking
temp = textlib.replaceCategoryInPlace(self.old, cats[0], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[0],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[1], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[1],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[2], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[2],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[3],
site=self.site)
self.assertEqual(self.old, new)
# Testing removing categories
temp = textlib.replaceCategoryInPlace(self.old, cats[0],
None, site=self.site)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertNotIn(cats[0], temp_cats)
# First and third categories are the same
self.assertEqual([cats[1], cats[3]], temp_cats)
# Testing adding categories
temp = textlib.replaceCategoryInPlace(
self.old, cats[0], cats[1], site=self.site,
add_only=True)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertEqual([cats[0], cats[1], cats[1],
cats[2], cats[1], cats[3]], temp_cats)
new_cats = textlib.getCategoryLinks(new, site=self.site)
self.assertEqual(cats, new_cats)
def test_in_place_retain_sort(self):
"""Test in-place category change does not alter the sortkey."""
# sort key should be retained when the new cat sortKey is None
dummy = pywikibot.Category(self.site, 'foo')
self.assertIsNone(dummy.sortKey)
cats = textlib.getCategoryLinks(self.old, site=self.site)
self.assertEqual(cats[3].sortKey, 'key')
orig_sortkey = cats[3].sortKey
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(self.old, temp)
new_dummy = textlib.getCategoryLinks(temp, site=self.site)[3]
self.assertIsNotNone(new_dummy.sortKey)
self.assertEqual(orig_sortkey, new_dummy.sortKey)
class TestTemplatesInCategory(TestCase):
"""Tests to verify that templates in category links are handled."""
family = 'wikipedia'
code = 'en'
cached = True
def test_templates(self):
"""Test normal templates inside category links."""
self.site = self.get_site()
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}|bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|{{P2|L33t|Foo}}}}|bar]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]][[Category:Wiki{{P2||pedia}}]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar'),
pywikibot.page.Category(self.site, 'Wikipedia')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}and{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='and|bar')])
with mock.patch.object(pywikibot, 'warning', autospec=True) as warn:
textlib.getCategoryLinks('[[Category:nasty{{{!}}]]', self.site)
warn.assert_called_once_with(
'Invalid category title extracted: nasty{{{!}}')
class TestTemplateParams(TestCase):
"""Test to verify that template params extraction works."""
net = False
def _common_results(self, func):
"""Common cases."""
self.assertEqual(func('{{a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{a|b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b|c=d}}'),
[('a', OrderedDict((('1', 'b'), ('c', 'd'))))])
self.assertEqual(func('{{a|b=c|f=g|d=e|1=}}'),
[('a', OrderedDict((('b', 'c'), ('f', 'g'),
('d', 'e'), ('1', ''))))])
self.assertEqual(func('{{a|1=2|c=d}}'),
[('a', OrderedDict((('1', '2'), ('c', 'd'))))])
self.assertEqual(func('{{a|c=d|1=2}}'),
[('a', OrderedDict((('c', 'd'), ('1', '2'))))])
self.assertEqual(func('{{a|5=d|a=b}}'),
[('a', OrderedDict((('5', 'd'), ('a', 'b'))))])
self.assertEqual(func('{{a|=2}}'),
[('a', OrderedDict((('', '2'), )))])
self.assertEqual(func('{{a|}}'), [('a', OrderedDict((('1', ''), )))])
self.assertEqual(func('{{a|=|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a||}}'),
[('a', OrderedDict((('1', ''), ('2', ''))))])
self.assertEqual(func('{{a|b={{{1}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'), )))])
self.assertEqual(func('{{a|b=<noinclude>{{{1}}}</noinclude>}}'),
[('a', OrderedDict(
(('b', '<noinclude>{{{1}}}</noinclude>'), )))])
self.assertEqual(func('{{subst:a|b=c}}'),
[('subst:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{safesubst:a|b=c}}'),
[('safesubst:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{msgnw:a|b=c}}'),
[('msgnw:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{Template:a|b=c}}'),
[('Template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{template:a|b=c}}'),
[('template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{:a|b=c}}'),
[(':a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{subst::a|b=c}}'),
[('subst::a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b={{{1}}}|c={{{2}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'),
('c', '{{{2}}}'))))])
self.assertEqual(func('{{a|b=c}}{{d|e=f}}'),
[('a', OrderedDict((('b', 'c'), ))),
('d', OrderedDict((('e', 'f'), )))])
# initial '{' and '}' should be ignored as outer wikitext
self.assertEqual(func('{{{a|b}}X}'),
[('a', OrderedDict((('1', 'b'), )))])
# sf.net bug 1575: unclosed template
self.assertEqual(func('{{a'), [])
self.assertEqual(func('{{a}}{{foo|'), [('a', OrderedDict())])
def _unstripped(self, func):
"""Common cases of unstripped results."""
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', '<!--{{{1}}}-->'), )))])
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict(((' ', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict(((' b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b ', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', ' c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c '), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' bar '))))])
# The correct entry 'bar' is removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
# However whitespace prevents the correct item from being removed
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '), (' 2 ', ' bar '),
('2', ' baz '))))])
def _stripped(self, func):
"""Common cases of stripped results."""
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '), ('2', 'bar'))))])
# 'bar' is always removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
def _etp_regex_differs(self, func):
"""Common cases not handled the same by ETP_REGEX."""
# inner {} should be treated as part of the value
self.assertEqual(func('{{a|b={} }}'),
[('a', OrderedDict((('b', '{} '), )))])
def _order_differs(self, func):
"""Common cases where the order of templates differs."""
self.assertCountEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), ))),
('c', OrderedDict())])
self.assertCountEqual(func('{{a|{{c|d}}}}'),
[('c', OrderedDict((('1', 'd'), ))),
('a', OrderedDict([('1', '{{c|d}}')]))])
# inner '}' after {{b|c}} should be treated as wikitext
self.assertCountEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b|c}}}'),
('2', 'd')])),
('b', OrderedDict([('1', 'c')]))])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh(self):
"""Test using mwparserfromhell."""
func = textlib.extract_templates_and_params_mwpfh
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh_stripped(self):
"""Test using mwparserfromhell with stripping."""
func = functools.partial(textlib.extract_templates_and_params_mwpfh,
strip=True)
self._common_results(func)
self._order_differs(func)
self._stripped(func)
def test_extract_templates_params_regex(self):
"""Test using many complex regexes."""
func = functools.partial(textlib.extract_templates_and_params_regex,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self.assertEqual(func('{{a|b={} }}'), []) # FIXME: {} is normal text
def test_extract_templates_params_regex_stripped(self):
"""Test using many complex regexes with stripping."""
func = textlib.extract_templates_and_params_regex
self._common_results(func)
self._order_differs(func)
self._stripped(func)
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', ''), )))])
# Identical to mwpfh
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
# However fails to correctly handle three levels of balanced brackets
# with empty parameters
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}}'), ))),
('d', OrderedDict([('1', '}')]))
])
def test_extract_templates_params(self):
"""Test that the normal entry point works."""
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._unstripped(func)
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=True)
self._common_results(func)
self._stripped(func)
def test_template_simple_regex(self):
"""Test using simple regex."""
func = textlib.extract_templates_and_params_regex_simple
self._common_results(func)
self._etp_regex_differs(func)
# The simple regex copies the whitespace of mwpfh, but does
# not have additional entries for nested templates.
self.assertEqual(func('{{a| b={{c}}}}'),
[('a', OrderedDict(((' b', '{{c}}'), )))])
self.assertEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), )))])
self.assertEqual(func('{{a|b= {{c}}}}'),
[('a', OrderedDict((('b', ' {{c}}'), )))])
self.assertEqual(func('{{a|b={{c}} }}'),
[('a', OrderedDict((('b', '{{c}} '), )))])
# These three are from _order_differs, and while the first works
self.assertEqual(func('{{a|{{c}} }}'),
[('a', OrderedDict((('1', '{{c}} '), )))])
# an inner '|' causes extract_template_and_params_regex_simple to
# split arguments incorrectly in the next two cases.
self.assertEqual(func('{{a|{{c|d}} }}'),
[('a', OrderedDict([('1', '{{c'),
('2', 'd}} ')]))])
self.assertEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b'),
('2', 'c}}}'),
('3', 'd')]))])
# Safe fallback to handle arbitrary template levels
# by merging top level templates together.
# i.e. 'b' is not recognised as a template, and 'foo' is also
# consumed as part of 'a'.
self.assertEqual(func('{{a|{{c|{{d|{{e|}}}} }} }} foo {{b}}'),
[(None, OrderedDict())])
def test_regexes(self):
"""Test _ETP_REGEX, NESTED_TEMPLATE_REGEX and TEMP_REGEX."""
func = textlib._ETP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
func = textlib._ETP_REGEX.match
self.assertIsNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{#if:foo}}'))
self.assertIsNotNone(func('{{foo:}}'))
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{1}}'))
self.assertIsNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNone(func('{{a|b={{{1}}} }}'))
self.assertIsNone(func('{{a|b={{c}} }}'))
self.assertIsNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|b={} }}'))
self.assertIsNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
with suppress_warnings('textlib.TEMP_REGEX is deprecated'):
func = textlib.TEMP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
with suppress_warnings('textlib.TEMP_REGEX is deprecated'):
func = textlib.TEMP_REGEX.match
self.assertIsNotNone(func('{{#if:foo}}'))
self.assertIsNotNone(func('{{foo:}}'))
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNone(func('{{a|b={{c}} }}'))
self.assertIsNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
func = textlib.NESTED_TEMPLATE_REGEX.search
# Numerically named templates are rejected
self.assertIsNone(func('{{1}}'))
self.assertIsNone(func('{{#if:foo}}'))
self.assertIsNone(func('{{{1}}}'))
self.assertIsNone(func('{{{1|}}}'))
self.assertIsNone(func('{{{15|a}}}'))
self.assertIsNone(func('{{{1|{{{2|a}}} }}}'))
self.assertIsNone(func('{{{1|{{2|a}} }}}'))
func = textlib.NESTED_TEMPLATE_REGEX.match
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{foo:bar}}'))
self.assertIsNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNotNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
# All templates are captured when template depth is greater than 2
m = func('{{a|{{c|{{d|}} }} | foo = bar }} foo {{bar}} baz')
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
m = func('{{a|\n{{c|{{d|}} }}\n| foo = bar }} foo {{bar}} baz')
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
class TestGenericTemplateParams(PatchingTestCase):
"""Test whether the generic function forwards the call correctly."""
net = False
@PatchingTestCase.patched(textlib, 'extract_templates_and_params_mwpfh')
def extract_mwpfh(self, text, *args, **kwargs):
"""Patched call to extract_templates_and_params_mwpfh."""
self._text = text
self._args = args
self._mwpfh = True
@PatchingTestCase.patched(textlib, 'extract_templates_and_params_regex')
def extract_regex(self, text, *args, **kwargs):
"""Patched call to extract_templates_and_params_regex."""
self._text = text
self._args = args
self._mwpfh = False
def test_removing_disabled_parts_regex(self):
"""Test removing disabled parts when using the regex variant."""
self.patch(textlib, 'mwparserfromhell', Exception())
textlib.extract_templates_and_params('{{a<!-- -->}}', True)
self.assertEqual(self._text, '{{a}}')
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}', False)
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}')
self.assertEqual(self._text, '{{a}}')
self.assertFalse(self._mwpfh)
@require_modules('mwparserfromhell')
def test_removing_disabled_parts_mwpfh(self):
"""Test removing disabled parts when using the mwpfh variant."""
textlib.extract_templates_and_params('{{a<!-- -->}}', True)
self.assertEqual(self._text, '{{a}}')
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}', False)
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}')
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertTrue(self._mwpfh)
def test_strip_regex(self):
"""Test stripping values when using the regex variant."""
self.patch(textlib, 'mwparserfromhell', Exception())
textlib.extract_templates_and_params('{{a| foo }}', False, True)
self.assertEqual(self._args, (False, True))
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', False, False)
self.assertEqual(self._args, (False, False))
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', False)
self.assertEqual(self._args, (False, True))
self.assertFalse(self._mwpfh)
@require_modules('mwparserfromhell')
def test_strip_mwpfh(self):
"""Test stripping values when using the mwpfh variant."""
textlib.extract_templates_and_params('{{a| foo }}', None, True)
self.assertEqual(self._args, (True, ))
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', None, False)
self.assertEqual(self._args, (False, ))
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}')
self.assertEqual(self._args, (False, ))
self.assertTrue(self._mwpfh)
class TestReplaceLinks(TestCase):
"""Test the replace_links function in textlib."""
sites = {
'wt': {
'family': 'wiktionary',
'code': 'en',
},
'wp': {
'family': 'wikipedia',
'code': 'en',
}
}
dry = True
text = ('Hello [[World]], [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
@classmethod
def setUpClass(cls):
"""Create a fake interwiki cache."""
super(TestReplaceLinks, cls).setUpClass()
# make APISite.interwiki work and prevent it from doing requests
for site in cls.sites.values():
mapping = {}
for iw in cls.sites.values():
mapping[iw['family']] = _IWEntry(True, 'invalid')
mapping[iw['family']]._site = iw['site']
mapping['bug'] = _IWEntry(False, 'invalid')
mapping['bug']._site = UnknownSite('Not a wiki')
mapping['en'] = _IWEntry(True, 'invalid')
mapping['en']._site = site['site']
site['site']._interwikimap._map = mapping
site['site']._interwikimap._site = None # prevent it from loading
cls.wp_site = cls.get_site('wp')
def test_replacements_function(self):
"""Test a dynamic function as the replacements."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
return pywikibot.Link('Homeworld', link.site)
elif link.title.lower() == 'you':
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[Homeworld]], [[how|are]] you? Are you a [[bug:1337]]?')
def test_replacements_once(self):
"""Test dynamic replacement."""
def callback(link, text, groups, rng):
if link.title.lower() == 'you':
self._count += 1
if link.section:
return pywikibot.Link(
'{0}#{1}'
.format(self._count, link.section), link.site)
else:
return pywikibot.Link('{0}'
.format(self._count), link.site)
self._count = 0 # buffer number of found instances
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[World]], [[how|are]] [[1#section]]? Are [[2]] a '
'[[bug:1337]]?')
del self._count
def test_unlink_all(self):
"""Test unlinking."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello World, are you? Are you a [[bug:1337]]?')
def test_unlink_some(self):
"""Test unlinking only some links."""
self.assertEqual(
textlib.replace_links(self.text, ('World', False), self.wp_site),
'Hello World, [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
self.assertEqual(
textlib.replace_links('[[User:Namespace|Label]]\n'
'[[User:Namespace#Section|Labelz]]\n'
'[[Nothing]]',
('User:Namespace', False),
self.wp_site),
'Label\nLabelz\n[[Nothing]]')
def test_replace_neighbour(self):
"""Test that it replaces two neighbouring links."""
self.assertEqual(
textlib.replace_links('[[A]][[A]][[C]]',
('A', 'B'),
self.wp_site),
'[[B|A]][[B|A]][[C]]')
def test_replacements_simplify(self):
"""Test a tuple as replacement removing the need for a piped link."""
self.assertEqual(
textlib.replace_links(self.text,
('how', 'are'),
self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_file(self):
"""Test that it respects the namespace."""
self.assertEqual(
textlib.replace_links(
'[[File:Meh.png|thumb|Description of [[fancy]]]] '
'[[Fancy]]...', ('File:Meh.png', 'File:Fancy.png'),
self.wp_site),
'[[File:Fancy.png|thumb|Description of [[fancy]]]] [[Fancy]]...')
def test_replace_strings(self):
"""Test if strings can be used."""
self.assertEqual(
textlib.replace_links(self.text, ('how', 'are'), self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_invalid_link_text(self):
"""Test that it doesn't pipe a link when it's an invalid link."""
self.assertEqual(
textlib.replace_links('[[Target|Foo:]]', ('Target', 'Foo'),
self.wp_site), '[[Foo|Foo:]]')
def test_replace_modes(self):
"""Test replacing with or without label and section."""
source_text = '[[Foo#bar|baz]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'), self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site, 'Bar')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar', self.wp_site)),
self.wp_site),
'[[Bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu'),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu',
self.wp_site)),
self.wp_site),
'[[Bar#snafu]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar|foo',
self.wp_site)),
self.wp_site),
'[[Bar|foo]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu|foo',
self.wp_site)),
self.wp_site),
'[[Bar#snafu|foo]]')
def test_replace_different_case(self):
"""Test that it uses piped links when the case is different."""
source_text = '[[Foo|Bar]] and [[Foo|bar]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wp')),
'[[Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wt')),
'[[bar|Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'),
self.get_site('wt')),
'[[Bar]] and [[Bar|bar]]')
@unittest.expectedFailure
def test_label_diff_namespace(self):
"""Test that it uses the old label when the new doesn't match."""
# These tests require to get the actual part which is before the title
# (interwiki and namespace prefixes) which could be then compared
# case insensitive.
self.assertEqual(
textlib.replace_links('[[Image:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|Image:Foobar]]')
self.assertEqual(
textlib.replace_links('[[en:File:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|en:File:Foobar]]')
def test_linktrails(self):
"""Test that the linktrails are used or applied."""
self.assertEqual(
textlib.replace_links('[[Foobar]]', ('Foobar', 'Foo'),
self.wp_site),
'[[Foo]]bar')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Talk:Tests'), self.wp_site),
'[[Talk:tests]]')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Project:Tests'),
self.wp_site),
'[[Project:Tests|Talk:tests]]')
def test_unicode_callback(self):
"""Test returning unicode in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a unicode instance not bytes
return 'homewörlder'
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello homewörlder, [[how|are]] [[you#section|you]]? '
'Are [[you]] a [[bug:1337]]?')
def test_bytes_callback(self):
"""Test returning bytes in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a bytes instance not unicode
return b'homeworlder'
self.assertRaisesRegex(
ValueError, r'unicode \(str.*bytes \(str',
textlib.replace_links, self.text, callback, self.wp_site)
def test_replace_interwiki_links(self):
"""Make sure interwiki links can not be replaced."""
link = '[[fr:how]]'
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('de:how', 'de:are'), self.wp_site),
link)
class TestReplaceLinksNonDry(TestCase):
"""Test the replace_links function in textlib non-dry."""
family = 'wikipedia'
code = 'en'
cached = True
def test_replace_interlanguage_links(self):
"""Test replacing interlanguage links."""
link = '[[:fr:how]]'
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('how', ':de:are'),
self.site),
link)
self.assertEqual(
textlib.replace_links(link, (':de:how', ':de:are'),
self.site),
link)
class TestLocalDigits(TestCase):
"""Test to verify that local digits are correctly being handled."""
net = False
def test_to_local(self):
"""Test converting Latin digits to local digits."""
self.assertEqual(textlib.to_local_digits(299792458, 'en'), 299792458)
self.assertEqual(
textlib.to_local_digits(299792458, 'fa'), '۲۹۹۷۹۲۴۵۸')
self.assertEqual(
textlib.to_local_digits(
'299792458 flash', 'fa'), '۲۹۹۷۹۲۴۵۸ flash')
self.assertEqual(
textlib.to_local_digits(
'299792458', 'km'), '២៩៩៧៩២៤៥៨')
class TestReplaceExcept(DefaultDrySiteTestCase):
"""Test to verify the replacements with exceptions are done correctly."""
def test_no_replace(self):
"""Test replacing when the old text does not match."""
self.assertEqual(textlib.replaceExcept('12345678', 'x', 'y', [],
site=self.site),
'12345678')
def test_simple_replace(self):
"""Test replacing without regex."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxxB', 'x', 'y', [],
site=self.site),
'AyyB')
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
site=self.site),
'AyyyB')
def test_regex_replace(self):
"""Test replacing with a regex."""
self.assertEqual(textlib.replaceExcept('A123B', r'\d', r'x', [],
site=self.site),
'AxxxB')
self.assertEqual(textlib.replaceExcept('A123B', r'\d+', r'x', [],
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('A123B',
r'A(\d)2(\d)B', r'A\1x\2B', [],
site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('', r'(a?)', r'\1B', [], site=self.site),
'B')
self.assertEqual(
textlib.replaceExcept('abc', r'x*', r'-', [], site=self.site),
'-a-b-c-')
# This is different from re.sub() as re.sub() doesn't
# allow None groups
self.assertEqual(
textlib.replaceExcept('', r'(a)?', r'\1\1', [], site=self.site),
'')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(\d)2(\d)B', r'A\g<1>x\g<2>B',
[], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(?P<b>\d)B',
r'A\g<a>x\g<b>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\g<2>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\2B', [], site=self.site),
'A1x3B')
# test regex with lookbehind.
self.assertEqual(
textlib.replaceExcept('A behindB C', r'(?<=behind)\w',
r'Z', [], site=self.site),
'A behindZ C')
# test regex with lookbehind and groups.
self.assertEqual(
textlib.replaceExcept('A behindB C D', r'(?<=behind)\w( )',
r'\g<1>Z', [], site=self.site),
'A behind ZC D')
# test regex with lookahead.
self.assertEqual(
textlib.replaceExcept('A Bahead C', r'\w(?=ahead)',
r'Z', [], site=self.site),
'A Zahead C')
# test regex with lookahead and groups.
self.assertEqual(
textlib.replaceExcept('A Bahead C D', r'( )\w(?=ahead)',
r'Z\g<1>', [], site=self.site),
'AZ ahead C D')
def test_case_sensitive(self):
"""Test replacing with different case sensitivity."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=False,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=False,
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
def test_replace_with_marker(self):
"""Test replacing with a marker."""
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
marker='.',
site=self.site),
'Ayyy.B')
self.assertEqual(textlib.replaceExcept('AxyxB', '1', 'y', [],
marker='.',
site=self.site),
'AxyxB.')
def test_overlapping_replace(self):
"""Test replacing with and without overlap."""
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=False,
site=self.site),
'2121')
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=True,
site=self.site),
'2221')
def test_replace_exception(self):
"""Test replacing not inside a specific regex."""
self.assertEqual(textlib.replaceExcept('123x123', '123', '000', [],
site=self.site),
'000x000')
self.assertEqual(textlib.replaceExcept('123x123', '123', '000',
[re.compile(r'\w123')],
site=self.site),
'000x123')
def test_replace_tags(self):
"""Test replacing not inside various tags."""
self.assertEqual(textlib.replaceExcept('A <!-- x --> B', 'x', 'y',
['comment'], site=self.site),
'A <!-- x --> B')
self.assertEqual(textlib.replaceExcept('\n==x==\n', 'x', 'y',
['header'], site=self.site),
'\n==x==\n')
self.assertEqual(textlib.replaceExcept('\n<!--'
'\ncomment-->==x==<!--comment'
'\n-->\n', 'x', 'y',
['header'], site=self.site),
'\n<!--\ncomment-->==x==<!--comment\n-->\n')
self.assertEqual(textlib.replaceExcept('<pre>x</pre>', 'x', 'y',
['pre'], site=self.site),
'<pre>x</pre>')
self.assertEqual(textlib.replaceExcept('<nowiki >x</nowiki >x',
'x', 'y', ['nowiki'],
site=self.site),
'<nowiki >x</nowiki >y') # T191559
self.assertEqual(textlib.replaceExcept('<source lang="xml">x</source>',
'x', 'y', ['source'],
site=self.site),
'<source lang="xml">x</source>')
self.assertEqual(textlib.replaceExcept('<source>x</source>',
'x', 'y', ['source'],
site=self.site),
'<source>x</source>')
self.assertEqual(textlib.replaceExcept(
'<syntaxhighlight lang="xml">x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight lang="xml">x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept('<syntaxhighlight>x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight>x</syntaxhighlight>')
self.assertEqual(textlib.replaceExcept('<includeonly>x</includeonly>',
'x', 'y', ['includeonly'],
site=self.site),
'<includeonly>x</includeonly>')
self.assertEqual(textlib.replaceExcept('<ref>x</ref>', 'x', 'y',
['ref'], site=self.site),
'<ref>x</ref>')
self.assertEqual(textlib.replaceExcept('<ref name="x">A</ref>',
'x', 'y',
['ref'], site=self.site),
'<ref name="x">A</ref>')
self.assertEqual(textlib.replaceExcept(' xA ', 'x', 'y',
['startspace'], site=self.site),
' xA ')
self.assertEqual(textlib.replaceExcept(':xA ', 'x', 'y',
['startcolon'], site=self.site),
':xA ')
self.assertEqual(textlib.replaceExcept('<table>x</table>', 'x', 'y',
['table'], site=self.site),
'<table>x</table>')
self.assertEqual(textlib.replaceExcept('x [http://www.sample.com x]',
'x', 'y', ['hyperlink'],
site=self.site),
'y [http://www.sample.com y]')
self.assertEqual(textlib.replaceExcept(
'x http://www.sample.com/x.html', 'x', 'y',
['hyperlink'], site=self.site), 'y http://www.sample.com/x.html')
self.assertEqual(textlib.replaceExcept('<gallery>x</gallery>',
'x', 'y', ['gallery'],
site=self.site),
'<gallery>x</gallery>')
self.assertEqual(textlib.replaceExcept('[[x]]', 'x', 'y', ['link'],
site=self.site),
'[[x]]')
self.assertEqual(textlib.replaceExcept('{{#property:p171}}', '1', '2',
['property'], site=self.site),
'{{#property:p171}}')
self.assertEqual(textlib.replaceExcept('{{#invoke:x}}', 'x', 'y',
['invoke'], site=self.site),
'{{#invoke:x}}')
self.assertEqual(
textlib.replaceExcept(
'<ref name=etwa /> not_in_ref <ref> in_ref </ref>',
'not_in_ref', 'text', ['ref'], site=self.site),
'<ref name=etwa /> text <ref> in_ref </ref>')
self.assertEqual(
textlib.replaceExcept(
'<ab> content </a>', 'content', 'text', ['a'], site=self.site),
'<ab> text </a>')
def test_replace_with_count(self):
"""Test replacing with count argument."""
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=5),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=2),
'y [[y]] x x')
self.assertEqual(textlib.replaceExcept(
'x [[x]] x x', 'x', 'y', ['link'], site=self.site, count=2),
'y [[x]] y x')
def test_replace_tag_category(self):
"""Test replacing not inside category links."""
for ns_name in self.site.namespaces[14]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['category'],
site=self.site),
'[[{}:x]]'.format(ns_name))
def test_replace_tag_file(self):
"""Test replacing not inside file links."""
for ns_name in self.site.namespaces[6]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['file'],
site=self.site),
'[[{}:x]]'.format(ns_name))
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo|bar x]] x',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo|bar x]] y')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]][[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]][[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[NonFile:x]]',
'x', 'y', ['file'], site=self.site),
'[[NonFile:y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:]]',
'File:', 'NonFile:', ['file'], site=self.site),
'[[File:]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|[[foo]].]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|[[foo]].]]')
# ensure only links inside file are captured
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
# Correctly handle single brackets in the text.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [bar].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [bar].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[bar] [[foo]] .x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[bar] [[foo]] .x]][[y]]')
def test_replace_tag_file_invalid(self):
"""Test replacing not inside file links with invalid titles."""
# Correctly handle [ and ] inside wikilinks inside file link
# even though these are an invalid title.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid ]].x]][[y]]')
@unittest.expectedFailure
def test_replace_tag_file_failure(self):
"""Test showing limits of the file link regex."""
# When the double brackets are unbalanced, the regex
# does not correctly detect the end of the file link.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [[invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
def test_replace_tags_interwiki(self):
"""Test replacing not inside interwiki links."""
if ('es' not in self.site.family.langs
or 'ey' in self.site.family.langs):
raise unittest.SkipTest("family {} doesn't have languages"
.format(self.site))
self.assertEqual(textlib.replaceExcept('[[es:s]]', 's', 't',
['interwiki'], site=self.site),
'[[es:s]]') # "es" is a valid interwiki code
self.assertEqual(textlib.replaceExcept('[[ex:x]]', 'x', 'y',
['interwiki'], site=self.site),
'[[ey:y]]') # "ex" is not a valid interwiki code
def test_replace_template(self):
"""Test replacing not inside templates."""
template_sample = (r'a {{templatename '
r' | accessdate={{Fecha|1993}} '
r' |atitle=The [[real title]] }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{a}}2{{a}} '
r' | 2={{a}}1{{a}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{{a}}}2{{{a}}} '
r' | 2={{{a}}}1{{{a}}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
# sf.net bug 1575: unclosed template
template_sample = template_sample[:-2]
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
def test_replace_source_reference(self):
"""Test replacing in text which contains back references."""
# Don't use a valid reference number in the original string,
# in case it tries to apply that as a reference.
self.assertEqual(textlib.replaceExcept(r'\42', r'^(.*)$', r'X\1X',
[], site=self.site),
r'X\42X')
self.assertEqual(textlib.replaceExcept(
r'\g<bar>', r'^(?P<foo>.*)$', r'X\g<foo>X', [], site=self.site),
r'X\g<bar>X')
class TestMultiTemplateMatchBuilder(DefaultDrySiteTestCase):
"""Test _MultiTemplateMatchBuilder."""
@classmethod
def setUpClass(cls):
"""Cache namespace 10 (Template) case sensitivity."""
super(TestMultiTemplateMatchBuilder, cls).setUpClass()
cls._template_not_case_sensitive = (
cls.get_site().namespaces.TEMPLATE.case != 'case-sensitive')
def test_no_match(self):
"""Test text without any desired templates."""
string = 'The quick brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNone(re.search(builder.pattern('quick'), string))
def test_match(self):
"""Test text with one match without parameters."""
string = 'The {{quick}} brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_with_params(self):
"""Test text with one match with parameters."""
string = 'The {{quick|brown}} fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_msg(self):
"""Test text with {{msg:..}}."""
string = 'The {{msg:quick}} brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_template_prefix(self):
"""Test pages with {{template:..}}."""
string = 'The {{%s:%s}} brown fox'
template = 'template'
builder = _MultiTemplateMatchBuilder(self.site)
if self._template_not_case_sensitive:
quick_list = ('quick', 'Quick')
else:
quick_list = ('quick', )
for t in (template.upper(), template.lower(), template.title()):
for q in quick_list:
self.assertIsNotNone(re.search(builder.pattern('quick'),
string % (t, q)))
self.assertEqual(bool(re.search(builder.pattern('Quick'),
string % (t, q))),
self._template_not_case_sensitive)
class TestGetLanguageLinks(SiteAttributeTestCase):
"""Test L{textlib.getLanguageLinks} function."""
sites = {
'enwp': {
'family': 'wikipedia',
'code': 'en',
},
'dewp': {
'family': 'wikipedia',
'code': 'de',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
example_text = ('[[en:Site]] [[de:Site|Piped]] [[commons:Site]] '
'[[baden:Site]] [[fr:{{PAGENAME}}]]')
@classmethod
def setUpClass(cls):
"""Define set of valid targets for the example text."""
super(TestGetLanguageLinks, cls).setUpClass()
cls.sites_set = {cls.enwp, cls.dewp}
def test_getLanguageLinks(self, key):
"""Test if the function returns the correct titles and sites."""
with mock.patch('pywikibot.output') as m:
lang_links = textlib.getLanguageLinks(self.example_text,
self.site)
m.assert_called_once_with(
'[getLanguageLinks] Text contains invalid interwiki link '
'[[fr:{{PAGENAME}}]].')
self.assertEqual({page.title() for page in lang_links.values()},
{'Site'})
self.assertEqual(set(lang_links), self.sites_set - {self.site})
class TestUnescape(TestCase):
"""Test to verify that unescaping HTML chars are correctly done."""
net = False
def test_unescape(self):
"""Test unescaping HTML chars."""
self.assertEqual(textlib.unescape('!23<>'"&&'),
'!23<>\'"&&')
class TestStarList(TestCase):
"""Test starlist."""
net = False
def test_basic(self):
"""Test standardizing {{linkfa}} without parameters."""
self.assertEqual(
'foo\n{{linkfa}}\nbar\n\n',
textlib.standardize_stars('foo\n{{linkfa}}\nbar'))
def test_with_params(self):
"""Test standardizing text with {{linkfa|...}}."""
self.assertEqual(
'foo\nbar\n\n{{linkfa|...}}\n',
textlib.standardize_stars('foo\n{{linkfa|...}}\nbar'))
def test_with_sorting_params(self):
"""Test standardizing text with sorting parameters."""
self.assertEqual(
'foo\n\n{{linkfa|bar}}\n{{linkfa|de}}\n'
'{{linkfa|en}}\n{{linkfa|fr}}\n',
textlib.standardize_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}'))
def test_get_stars(self):
"""Test get_starts method."""
self.assertEqual(
['{{linkfa|en}}\n', '{{linkfa|de}}\n',
'{{linkfa|fr}}\n', '{{linkfa|bar}}'],
textlib.get_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}'))
def test_remove_stars(self):
"""Test remove_stars method."""
self.assertEqual(
'foo\n{{linkfa|en}}\n{{linkfa|fr}}\n{{linkfa|bar}}',
textlib.remove_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}', ['{{linkfa|de}}\n']))
def test_append_stars(self):
"""Test append_stars method."""
self.assertEqual(
'foo\n\n{{linkfa|bar}}\n{{linkfa|de}}\n'
'{{linkfa|en}}\n{{linkfa|fr}}\n',
textlib.append_stars(
'foo', ['{{linkfa|en}}\n', '{{linkfa|de}}\n',
'{{linkfa|fr}}\n', '{{linkfa|bar}}']))
class TestExtractSections(DefaultDrySiteTestCase):
"""Test the extract_sections function."""
def test_no_sections_no_footer(self):
"""Test for text having no sections or footer."""
self.assertEqual(
extract_sections('text', self.site),
('text', [], '')
)
def test_no_sections_with_footer(self):
"""Test for text having footer but no section."""
self.assertEqual(
extract_sections('text\n\n[[Category:A]]', self.site),
('text\n\n', [], '[[Category:A]]')
)
def test_with_section_no_footer(self):
"""Test for text having sections but no footer."""
self.assertEqual(
extract_sections(
'text\n\n'
'==title==\n'
'content',
self.site),
('text\n\n', [('==title==', '\ncontent')], '')
)
def test_with_section_with_footer(self):
"""Test for text having sections and footer."""
self.assertEqual(
extract_sections(
'text\n\n'
'==title==\n'
'content\n'
'[[Category:A]]\n',
self.site),
('text\n\n', [('==title==', '\ncontent\n')], '[[Category:A]]\n')
)
def test_with_h1_and_h2_sections(self):
"""Test for text having h1 and h2 sections."""
self.assertEqual(
extract_sections(
'text\n\n'
'=first level=\n'
'foo\n'
'==title==\n'
'bar',
self.site),
('text\n\n',
[('=first level=', '\nfoo\n'), ('==title==', '\nbar')],
'')
)
def test_with_h4_and_h2_sections(self):
"""Test for text having h4 and h2 sections."""
self.assertEqual(
extract_sections(
'text\n\n'
'====title====\n'
'==title 2==\n'
'content',
self.site),
('text\n\n',
[('====title====', '\n'), ('==title 2==', '\ncontent')],
'')
)
def test_long_comment(self):
r"""Test for text having a long expanse of white space.
This is to catch certain regex issues caused by patterns like
r'(\s+)*$' (as found in older versions of extract_section).
They may not halt.
c.f.
https://www.regular-expressions.info/catastrophic.html
"""
text = '<!-- -->'
self.assertEqual(
extract_sections(text, self.site),
(text, [], '')
)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit | -4,886,598,638,372,700,000 | 41.743228 | 79 | 0.479054 | false |
mwclient/mwclient | mwclient/errors.py | 1 | 2408 | class MwClientError(RuntimeError):
pass
class MediaWikiVersionError(MwClientError):
pass
class APIDisabledError(MwClientError):
pass
class MaximumRetriesExceeded(MwClientError):
pass
class APIError(MwClientError):
def __init__(self, code, info, kwargs):
self.code = code
self.info = info
super(APIError, self).__init__(code, info, kwargs)
class InsufficientPermission(MwClientError):
pass
class UserBlocked(InsufficientPermission):
pass
class EditError(MwClientError):
pass
class ProtectedPageError(EditError, InsufficientPermission):
def __init__(self, page, code=None, info=None):
self.page = page
self.code = code
self.info = info
def __str__(self):
if self.info is not None:
return self.info
return 'You do not have the "edit" right.'
class FileExists(EditError):
pass
class LoginError(MwClientError):
def __init__(self, site, code, info):
super(LoginError, self).__init__(
site,
{'result': code, 'reason': info} # For backwards-compability
)
self.site = site
self.code = code
self.info = info
def __str__(self):
return self.info
class OAuthAuthorizationError(LoginError):
pass
class AssertUserFailedError(MwClientError):
def __init__(self):
super(AssertUserFailedError, self).__init__((
'By default, mwclient protects you from accidentally editing '
'without being logged in. If you actually want to edit without '
'logging in, you can set force_login on the Site object to False.'
))
def __str__(self):
return self.args[0]
class EmailError(MwClientError):
pass
class NoSpecifiedEmail(EmailError):
pass
class NoWriteApi(MwClientError):
pass
class InvalidResponse(MwClientError):
def __init__(self, response_text=None):
super(InvalidResponse, self).__init__((
'Did not get a valid JSON response from the server. Check that '
'you used the correct hostname. If you did, the server might '
'be wrongly configured or experiencing temporary problems.'),
response_text
)
self.response_text = response_text
def __str__(self):
return self.args[0]
class InvalidPageTitle(MwClientError):
pass
| mit | 992,671,874,082,866,000 | 20.122807 | 78 | 0.634136 | false |
demisto/content | Packs/Base/Scripts/CommonServerPython/CommonServerPython.py | 1 | 291007 | """Common functions script
This script will be appended to each server script before being executed.
Please notice that to add custom common code, add it to the CommonServerUserPython script.
Note that adding code to CommonServerUserPython can override functions in CommonServerPython
"""
from __future__ import print_function
import base64
import json
import logging
import os
import re
import socket
import sys
import time
import traceback
from random import randint
import xml.etree.cElementTree as ET
from collections import OrderedDict
from datetime import datetime, timedelta
from abc import abstractmethod
from distutils.version import LooseVersion
from threading import Lock
import demistomock as demisto
import warnings
class WarningsHandler(object):
# Wrapper to handle warnings. We use a class to cleanup after execution
@staticmethod
def handle_warning(message, category, filename, lineno, file=None, line=None):
try:
msg = warnings.formatwarning(message, category, filename, lineno, line)
demisto.info("python warning: " + msg)
except Exception:
# ignore the warning if it can't be handled for some reason
pass
def __init__(self):
self.org_handler = warnings.showwarning
warnings.showwarning = WarningsHandler.handle_warning
def __del__(self):
warnings.showwarning = self.org_handler
_warnings_handler = WarningsHandler()
# ignore warnings from logging as a result of not being setup
logging.raiseExceptions = False
# imports something that can be missed from docker image
try:
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from typing import Optional, Dict, List, Any, Union, Set
import dateparser
from datetime import timezone # type: ignore
except Exception:
if sys.version_info[0] < 3:
# in python 2 an exception in the imports might still be raised even though it is caught.
# for more info see https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
sys.exc_clear()
CONTENT_RELEASE_VERSION = '0.0.0'
CONTENT_BRANCH_NAME = 'master'
IS_PY3 = sys.version_info[0] == 3
STIX_PREFIX = "STIX "
# pylint: disable=undefined-variable
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
if IS_PY3:
STRING_TYPES = (str, bytes) # type: ignore
STRING_OBJ_TYPES = (str,)
else:
STRING_TYPES = (str, unicode) # type: ignore # noqa: F821
STRING_OBJ_TYPES = STRING_TYPES # type: ignore
# pylint: enable=undefined-variable
# DEPRECATED - use EntryType enum instead
entryTypes = {
'note': 1,
'downloadAgent': 2,
'file': 3,
'error': 4,
'pinned': 5,
'userManagement': 6,
'image': 7,
'playgroundError': 8,
'entryInfoFile': 9,
'warning': 11,
'map': 15,
'widget': 17
}
ENDPOINT_STATUS_OPTIONS = [
'Online',
'Offline'
]
ENDPOINT_ISISOLATED_OPTIONS = [
'Yes',
'No',
'Pending isolation',
'Pending unisolation'
]
class EntryType(object):
"""
Enum: contains all the entry types (e.g. NOTE, ERROR, WARNING, FILE, etc.)
:return: None
:rtype: ``None``
"""
NOTE = 1
DOWNLOAD_AGENT = 2
FILE = 3
ERROR = 4
PINNED = 5
USER_MANAGEMENT = 6
IMAGE = 7
PLAYGROUND_ERROR = 8
ENTRY_INFO_FILE = 9
WARNING = 11
MAP_ENTRY_TYPE = 15
WIDGET = 17
class IncidentStatus(object):
"""
Enum: contains all the incidents status types (e.g. pending, active, done, archive)
:return: None
:rtype: ``None``
"""
PENDING = 0
ACTIVE = 1
DONE = 2
ARCHIVE = 3
class IncidentSeverity(object):
"""
Enum: contains all the incident severity types
:return: None
:rtype: ``None``
"""
UNKNOWN = 0
INFO = 0.5
LOW = 1
MEDIUM = 2
HIGH = 3
CRITICAL = 4
# DEPRECATED - use EntryFormat enum instead
formats = {
'html': 'html',
'table': 'table',
'json': 'json',
'text': 'text',
'dbotResponse': 'dbotCommandResponse',
'markdown': 'markdown'
}
class EntryFormat(object):
"""
Enum: contains all the entry formats (e.g. HTML, TABLE, JSON, etc.)
"""
HTML = 'html'
TABLE = 'table'
JSON = 'json'
TEXT = 'text'
DBOT_RESPONSE = 'dbotCommandResponse'
MARKDOWN = 'markdown'
@classmethod
def is_valid_type(cls, _type):
# type: (str) -> bool
return _type in (
EntryFormat.HTML,
EntryFormat.TABLE,
EntryFormat.JSON,
EntryFormat.TEXT,
EntryFormat.MARKDOWN,
EntryFormat.DBOT_RESPONSE
)
brands = {
'xfe': 'xfe',
'vt': 'virustotal',
'wf': 'WildFire',
'cy': 'cylance',
'cs': 'crowdstrike-intel'
}
providers = {
'xfe': 'IBM X-Force Exchange',
'vt': 'VirusTotal',
'wf': 'WildFire',
'cy': 'Cylance',
'cs': 'CrowdStrike'
}
thresholds = {
'xfeScore': 4,
'vtPositives': 10,
'vtPositiveUrlsForIP': 30
}
class DBotScoreType(object):
"""
Enum: contains all the indicator types
DBotScoreType.IP
DBotScoreType.FILE
DBotScoreType.DOMAIN
DBotScoreType.URL
DBotScoreType.CVE
DBotScoreType.ACCOUNT
DBotScoreType.CRYPTOCURRENCY
DBotScoreType.EMAIL
:return: None
:rtype: ``None``
"""
IP = 'ip'
FILE = 'file'
DOMAIN = 'domain'
URL = 'url'
CVE = 'cve'
ACCOUNT = 'account'
CIDR = 'cidr',
DOMAINGLOB = 'domainglob'
CERTIFICATE = 'certificate'
CRYPTOCURRENCY = 'cryptocurrency'
EMAIL = 'email'
def __init__(self):
# required to create __init__ for create_server_docs.py purpose
pass
@classmethod
def is_valid_type(cls, _type):
# type: (str) -> bool
return _type in (
DBotScoreType.IP,
DBotScoreType.FILE,
DBotScoreType.DOMAIN,
DBotScoreType.URL,
DBotScoreType.CVE,
DBotScoreType.ACCOUNT,
DBotScoreType.CIDR,
DBotScoreType.DOMAINGLOB,
DBotScoreType.CERTIFICATE,
DBotScoreType.CRYPTOCURRENCY,
DBotScoreType.EMAIL,
)
class DBotScoreReliability(object):
"""
Enum: Source reliability levels
Values are case sensitive
:return: None
:rtype: ``None``
"""
A_PLUS = 'A+ - 3rd party enrichment'
A = 'A - Completely reliable'
B = 'B - Usually reliable'
C = 'C - Fairly reliable'
D = 'D - Not usually reliable'
E = 'E - Unreliable'
F = 'F - Reliability cannot be judged'
def __init__(self):
# required to create __init__ for create_server_docs.py purpose
pass
@staticmethod
def is_valid_type(_type):
# type: (str) -> bool
return _type in (
DBotScoreReliability.A_PLUS,
DBotScoreReliability.A,
DBotScoreReliability.B,
DBotScoreReliability.C,
DBotScoreReliability.D,
DBotScoreReliability.E,
DBotScoreReliability.F,
)
@staticmethod
def get_dbot_score_reliability_from_str(reliability_str):
if reliability_str == DBotScoreReliability.A_PLUS:
return DBotScoreReliability.A_PLUS
elif reliability_str == DBotScoreReliability.A:
return DBotScoreReliability.A
elif reliability_str == DBotScoreReliability.B:
return DBotScoreReliability.B
elif reliability_str == DBotScoreReliability.C:
return DBotScoreReliability.C
elif reliability_str == DBotScoreReliability.D:
return DBotScoreReliability.D
elif reliability_str == DBotScoreReliability.E:
return DBotScoreReliability.E
elif reliability_str == DBotScoreReliability.F:
return DBotScoreReliability.F
raise Exception("Please use supported reliability only.")
INDICATOR_TYPE_TO_CONTEXT_KEY = {
'ip': 'Address',
'email': 'Address',
'url': 'Data',
'domain': 'Name',
'cve': 'ID',
'md5': 'file',
'sha1': 'file',
'sha256': 'file',
'crc32': 'file',
'sha512': 'file',
'ctph': 'file',
'ssdeep': 'file'
}
class FeedIndicatorType(object):
"""Type of Indicator (Reputations), used in TIP integrations"""
Account = "Account"
CVE = "CVE"
Domain = "Domain"
DomainGlob = "DomainGlob"
Email = "Email"
File = "File"
FQDN = "Domain"
Host = "Host"
IP = "IP"
CIDR = "CIDR"
IPv6 = "IPv6"
IPv6CIDR = "IPv6CIDR"
Registry = "Registry Key"
SSDeep = "ssdeep"
URL = "URL"
@staticmethod
def is_valid_type(_type):
return _type in (
FeedIndicatorType.Account,
FeedIndicatorType.CVE,
FeedIndicatorType.Domain,
FeedIndicatorType.DomainGlob,
FeedIndicatorType.Email,
FeedIndicatorType.File,
FeedIndicatorType.Host,
FeedIndicatorType.IP,
FeedIndicatorType.CIDR,
FeedIndicatorType.IPv6,
FeedIndicatorType.IPv6CIDR,
FeedIndicatorType.Registry,
FeedIndicatorType.SSDeep,
FeedIndicatorType.URL
)
@staticmethod
def list_all_supported_indicators():
indicator_types = []
for key, val in vars(FeedIndicatorType).items():
if not key.startswith('__') and type(val) == str:
indicator_types.append(val)
return indicator_types
@staticmethod
def ip_to_indicator_type(ip):
"""Returns the indicator type of the input IP.
:type ip: ``str``
:param ip: IP address to get it's indicator type.
:rtype: ``str``
:return:: Indicator type from FeedIndicatorType, or None if invalid IP address.
"""
if re.match(ipv4cidrRegex, ip):
return FeedIndicatorType.CIDR
elif re.match(ipv4Regex, ip):
return FeedIndicatorType.IP
elif re.match(ipv6cidrRegex, ip):
return FeedIndicatorType.IPv6CIDR
elif re.match(ipv6Regex, ip):
return FeedIndicatorType.IPv6
else:
return None
@staticmethod
def indicator_type_by_server_version(indicator_type):
"""Returns the indicator type of the input by the server version.
If the server version is 6.2 and greater, remove the STIX prefix of the type
:type indicator_type: ``str``
:param indicator_type: Type of an indicator.
:rtype: ``str``
:return:: Indicator type .
"""
if is_demisto_version_ge("6.2.0") and indicator_type.startswith(STIX_PREFIX):
return indicator_type[len(STIX_PREFIX):]
return indicator_type
# -------------------------------- Threat Intel Objects ----------------------------------- #
class ThreatIntel:
"""
XSOAR Threat Intel Objects
:return: None
:rtype: ``None``
"""
class ObjectsNames(object):
"""
Enum: Threat Intel Objects names.
:return: None
:rtype: ``None``
"""
CAMPAIGN = 'Campaign'
ATTACK_PATTERN = 'Attack Pattern'
REPORT = 'Report'
MALWARE = 'Malware'
COURSE_OF_ACTION = 'Course of Action'
INTRUSION_SET = 'Intrusion Set'
TOOL = 'Tool'
class ObjectsScore(object):
"""
Enum: Threat Intel Objects Score.
:return: None
:rtype: ``None``
"""
CAMPAIGN = 3
ATTACK_PATTERN = 2
REPORT = 3
MALWARE = 3
COURSE_OF_ACTION = 0
INTRUSION_SET = 3
TOOL = 2
class KillChainPhases(object):
"""
Enum: Kill Chain Phases names.
:return: None
:rtype: ``None``
"""
BUILD_CAPABILITIES = "Build Capabilities"
PRIVILEGE_ESCALATION = "Privilege Escalation"
ADVERSARY_OPSEC = "Adversary Opsec"
CREDENTIAL_ACCESS = "Credential Access"
EXFILTRATION = "Exfiltration"
LATERAL_MOVEMENT = "Lateral Movement"
DEFENSE_EVASION = "Defense Evasion"
PERSISTENCE = "Persistence"
COLLECTION = "Collection"
IMPACT = "Impact"
INITIAL_ACCESS = "Initial Access"
DISCOVERY = "Discovery"
EXECUTION = "Execution"
INSTALLATION = "Installation"
DELIVERY = "Delivery"
WEAPONIZATION = "Weaponization"
ACT_ON_OBJECTIVES = "Actions on Objectives"
COMMAND_AND_CONTROL = "Command \u0026 Control"
def is_debug_mode():
"""Return if this script/command was passed debug-mode=true option
:return: true if debug-mode is enabled
:rtype: ``bool``
"""
# use `hasattr(demisto, 'is_debug')` to ensure compatibility with server version <= 4.5
return hasattr(demisto, 'is_debug') and demisto.is_debug
def get_schedule_metadata(context):
"""
Get the entry schedule metadata if available
:type context: ``dict``
:param context: Context in which the command was executed.
:return: Dict with metadata of scheduled entry
:rtype: ``dict``
"""
schedule_metadata = {}
parent_entry = context.get('ParentEntry', {})
if parent_entry:
schedule_metadata = assign_params(
is_polling=True if parent_entry.get('polling') else False,
polling_command=parent_entry.get('pollingCommand'),
polling_args=parent_entry.get('pollingArgs'),
times_ran=int(parent_entry.get('timesRan', 0)) + 1,
start_date=parent_entry.get('startDate'),
end_date=parent_entry.get('endingDate')
)
return schedule_metadata
def auto_detect_indicator_type(indicator_value):
"""
Infer the type of the indicator.
:type indicator_value: ``str``
:param indicator_value: The indicator whose type we want to check. (required)
:return: The type of the indicator.
:rtype: ``str``
"""
try:
import tldextract
except Exception:
raise Exception("Missing tldextract module, In order to use the auto detect function please use a docker"
" image with it installed such as: demisto/jmespath")
if re.match(ipv4cidrRegex, indicator_value):
return FeedIndicatorType.CIDR
if re.match(ipv6cidrRegex, indicator_value):
return FeedIndicatorType.IPv6CIDR
if re.match(ipv4Regex, indicator_value):
return FeedIndicatorType.IP
if re.match(ipv6Regex, indicator_value):
return FeedIndicatorType.IPv6
if re.match(sha256Regex, indicator_value):
return FeedIndicatorType.File
if re.match(urlRegex, indicator_value):
return FeedIndicatorType.URL
if re.match(md5Regex, indicator_value):
return FeedIndicatorType.File
if re.match(sha1Regex, indicator_value):
return FeedIndicatorType.File
if re.match(emailRegex, indicator_value):
return FeedIndicatorType.Email
if re.match(cveRegex, indicator_value):
return FeedIndicatorType.CVE
if re.match(sha512Regex, indicator_value):
return FeedIndicatorType.File
try:
tldextract_version = tldextract.__version__
if LooseVersion(tldextract_version) < '3.0.0':
no_cache_extract = tldextract.TLDExtract(cache_file=False, suffix_list_urls=None)
else:
no_cache_extract = tldextract.TLDExtract(cache_dir=False, suffix_list_urls=None)
if no_cache_extract(indicator_value).suffix:
if '*' in indicator_value:
return FeedIndicatorType.DomainGlob
return FeedIndicatorType.Domain
except Exception:
demisto.debug('tldextract failed to detect indicator type. indicator value: {}'.format(indicator_value))
demisto.debug('Failed to detect indicator type. Indicator value: {}'.format(indicator_value))
return None
def handle_proxy(proxy_param_name='proxy', checkbox_default_value=False, handle_insecure=True,
insecure_param_name=None):
"""
Handle logic for routing traffic through the system proxy.
Should usually be called at the beginning of the integration, depending on proxy checkbox state.
Additionally will unset env variables REQUESTS_CA_BUNDLE and CURL_CA_BUNDLE if handle_insecure is speficied (default).
This is needed as when these variables are set and a requests.Session object is used, requests will ignore the
Sesssion.verify setting. See: https://github.com/psf/requests/blob/master/requests/sessions.py#L703
:type proxy_param_name: ``string``
:param proxy_param_name: name of the "use system proxy" integration parameter
:type checkbox_default_value: ``bool``
:param checkbox_default_value: Default value of the proxy param checkbox
:type handle_insecure: ``bool``
:param handle_insecure: Whether to check the insecure param and unset env variables
:type insecure_param_name: ``string``
:param insecure_param_name: Name of insecure param. If None will search insecure and unsecure
:rtype: ``dict``
:return: proxies dict for the 'proxies' parameter of 'requests' functions
"""
proxies = {} # type: dict
if demisto.params().get(proxy_param_name, checkbox_default_value):
proxies = {
'http': os.environ.get('HTTP_PROXY') or os.environ.get('http_proxy', ''),
'https': os.environ.get('HTTPS_PROXY') or os.environ.get('https_proxy', '')
}
else:
skip_proxy()
if handle_insecure:
if insecure_param_name is None:
param_names = ('insecure', 'unsecure')
else:
param_names = (insecure_param_name,) # type: ignore[assignment]
for p in param_names:
if demisto.params().get(p, False):
skip_cert_verification()
return proxies
def skip_proxy():
"""
The function deletes the proxy environment vars in order to http requests to skip routing through proxy
:return: None
:rtype: ``None``
"""
for k in ('HTTP_PROXY', 'HTTPS_PROXY', 'http_proxy', 'https_proxy'):
if k in os.environ:
del os.environ[k]
def skip_cert_verification():
"""
The function deletes the self signed certificate env vars in order to http requests to skip certificate validation.
:return: None
:rtype: ``None``
"""
for k in ('REQUESTS_CA_BUNDLE', 'CURL_CA_BUNDLE'):
if k in os.environ:
del os.environ[k]
def urljoin(url, suffix=""):
"""
Will join url and its suffix
Example:
"https://google.com/", "/" => "https://google.com/"
"https://google.com", "/" => "https://google.com/"
"https://google.com", "api" => "https://google.com/api"
"https://google.com", "/api" => "https://google.com/api"
"https://google.com/", "api" => "https://google.com/api"
"https://google.com/", "/api" => "https://google.com/api"
:type url: ``string``
:param url: URL string (required)
:type suffix: ``string``
:param suffix: the second part of the url
:rtype: ``string``
:return: Full joined url
"""
if url[-1:] != "/":
url = url + "/"
if suffix.startswith("/"):
suffix = suffix[1:]
return url + suffix
return url + suffix
def positiveUrl(entry):
"""
Checks if the given entry from a URL reputation query is positive (known bad) (deprecated)
:type entry: ``dict``
:param entry: URL entry (required)
:return: True if bad, false otherwise
:rtype: ``bool``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['xfe']:
return demisto.get(entry, 'Contents.url.result.score') > thresholds['xfeScore']
if entry['Brand'] == brands['vt']:
return demisto.get(entry, 'Contents.positives') > thresholds['vtPositives']
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
return demisto.get(c, 'indicator') and demisto.get(c, 'malicious_confidence') in ['high', 'medium']
return False
def positiveFile(entry):
"""
Checks if the given entry from a file reputation query is positive (known bad) (deprecated)
:type entry: ``dict``
:param entry: File entry (required)
:return: True if bad, false otherwise
:rtype: ``bool``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['xfe'] and (demisto.get(entry, 'Contents.malware.family')
or demisto.gets(entry, 'Contents.malware.origins.external.family')):
return True
if entry['Brand'] == brands['vt']:
return demisto.get(entry, 'Contents.positives') > thresholds['vtPositives']
if entry['Brand'] == brands['wf']:
return demisto.get(entry, 'Contents.wildfire.file_info.malware') == 'yes'
if entry['Brand'] == brands['cy'] and demisto.get(entry, 'Contents'):
contents = demisto.get(entry, 'Contents')
k = contents.keys()
if k and len(k) > 0:
v = contents[k[0]]
if v and demisto.get(v, 'generalscore'):
return v['generalscore'] < -0.5
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
return demisto.get(c, 'indicator') and demisto.get(c, 'malicious_confidence') in ['high', 'medium']
return False
def vtCountPositives(entry):
"""
Counts the number of detected URLs in the entry
:type entry: ``dict``
:param entry: Demisto entry (required)
:return: The number of detected URLs
:rtype: ``int``
"""
positives = 0
if demisto.get(entry, 'Contents.detected_urls'):
for detected in demisto.get(entry, 'Contents.detected_urls'):
if demisto.get(detected, 'positives') > thresholds['vtPositives']:
positives += 1
return positives
def positiveIp(entry):
"""
Checks if the given entry from a file reputation query is positive (known bad) (deprecated)
:type entry: ``dict``
:param entry: IP entry (required)
:return: True if bad, false otherwise
:rtype: ``bool``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['xfe']:
return demisto.get(entry, 'Contents.reputation.score') > thresholds['xfeScore']
if entry['Brand'] == brands['vt'] and demisto.get(entry, 'Contents.detected_urls'):
return vtCountPositives(entry) > thresholds['vtPositiveUrlsForIP']
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
return demisto.get(c, 'indicator') and demisto.get(c, 'malicious_confidence') in ['high', 'medium']
return False
def formatEpochDate(t):
"""
Convert a time expressed in seconds since the epoch to a string representing local time
:type t: ``int``
:param t: Time represented in seconds (required)
:return: A string representing local time
:rtype: ``str``
"""
if t:
return time.ctime(t)
return ''
def shortCrowdStrike(entry):
"""
Display CrowdStrike Intel results in Markdown (deprecated)
:type entry: ``dict``
:param entry: CrowdStrike result entry (required)
:return: A Demisto entry containing the shortened CrowdStrike info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
csRes = '## CrowdStrike Falcon Intelligence'
csRes += '\n\n### Indicator - ' + demisto.gets(c, 'indicator')
labels = demisto.get(c, 'labels')
if labels:
csRes += '\n### Labels'
csRes += '\nName|Created|Last Valid'
csRes += '\n----|-------|----------'
for label in labels:
csRes += '\n' + demisto.gets(label, 'name') + '|' + \
formatEpochDate(demisto.get(label, 'created_on')) + '|' + \
formatEpochDate(demisto.get(label, 'last_valid_on'))
relations = demisto.get(c, 'relations')
if relations:
csRes += '\n### Relations'
csRes += '\nIndicator|Type|Created|Last Valid'
csRes += '\n---------|----|-------|----------'
for r in relations:
csRes += '\n' + demisto.gets(r, 'indicator') + '|' + demisto.gets(r, 'type') + '|' + \
formatEpochDate(demisto.get(label, 'created_date')) + '|' + \
formatEpochDate(demisto.get(label, 'last_valid_date'))
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': csRes}
return entry
def shortUrl(entry):
"""
Formats a URL reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: URL result entry (required)
:return: A Demisto entry containing the shortened URL info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
c = entry['Contents']
if entry['Brand'] == brands['xfe']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'Country': c['country'], 'MalwareCount': demisto.get(c, 'malware.count'),
'A': demisto.gets(c, 'resolution.A'), 'AAAA': demisto.gets(c, 'resolution.AAAA'),
'Score': demisto.get(c, 'url.result.score'), 'Categories': demisto.gets(c, 'url.result.cats'),
'URL': demisto.get(c, 'url.result.url'), 'Provider': providers['xfe'],
'ProviderLink': 'https://exchange.xforce.ibmcloud.com/url/' + demisto.get(c, 'url.result.url')}}
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'ScanDate': c['scan_date'], 'Positives': c['positives'], 'Total': c['total'],
'URL': c['url'], 'Provider': providers['vt'], 'ProviderLink': c['permalink']}}
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
return shortCrowdStrike(entry)
return {'ContentsFormat': 'text', 'Type': 4, 'Contents': 'Unknown provider for result: ' + entry['Brand']}
def shortFile(entry):
"""
Formats a file reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: File result entry (required)
:return: A Demisto entry containing the shortened file info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
c = entry['Contents']
if entry['Brand'] == brands['xfe']:
cm = c['malware']
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'Family': cm['family'], 'MIMEType': cm['mimetype'], 'MD5': cm['md5'][2:] if 'md5' in cm else '',
'CnCServers': demisto.get(cm, 'origins.CncServers.count'),
'DownloadServers': demisto.get(cm, 'origins.downloadServers.count'),
'Emails': demisto.get(cm, 'origins.emails.count'),
'ExternalFamily': demisto.gets(cm, 'origins.external.family'),
'ExternalCoverage': demisto.get(cm, 'origins.external.detectionCoverage'),
'Provider': providers['xfe'],
'ProviderLink': 'https://exchange.xforce.ibmcloud.com/malware/' + cm['md5'].replace('0x', '')}}
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'Resource': c['resource'], 'ScanDate': c['scan_date'], 'Positives': c['positives'],
'Total': c['total'], 'SHA1': c['sha1'], 'SHA256': c['sha256'], 'Provider': providers['vt'],
'ProviderLink': c['permalink']}}
if entry['Brand'] == brands['wf']:
c = demisto.get(entry, 'Contents.wildfire.file_info')
if c:
return {'Contents': {'Type': c['filetype'], 'Malware': c['malware'], 'MD5': c['md5'],
'SHA256': c['sha256'], 'Size': c['size'], 'Provider': providers['wf']},
'ContentsFormat': formats['table'], 'Type': entryTypes['note']}
if entry['Brand'] == brands['cy'] and demisto.get(entry, 'Contents'):
contents = demisto.get(entry, 'Contents')
k = contents.keys()
if k and len(k) > 0:
v = contents[k[0]]
if v and demisto.get(v, 'generalscore'):
return {'Contents': {'Status': v['status'], 'Code': v['statuscode'], 'Score': v['generalscore'],
'Classifiers': str(v['classifiers']), 'ConfirmCode': v['confirmcode'],
'Error': v['error'], 'Provider': providers['cy']},
'ContentsFormat': formats['table'], 'Type': entryTypes['note']}
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
return shortCrowdStrike(entry)
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'],
'Contents': 'Unknown provider for result: ' + entry['Brand']}
def shortIp(entry):
"""
Formats an ip reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: IP result entry (required)
:return: A Demisto entry containing the shortened IP info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
c = entry['Contents']
if entry['Brand'] == brands['xfe']:
cr = c['reputation']
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'IP': cr['ip'], 'Score': cr['score'], 'Geo': str(cr['geo']), 'Categories': str(cr['cats']),
'Provider': providers['xfe']}}
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': {'Positive URLs': vtCountPositives(entry), 'Provider': providers['vt']}}
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
return shortCrowdStrike(entry)
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'],
'Contents': 'Unknown provider for result: ' + entry['Brand']}
def shortDomain(entry):
"""
Formats a domain reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: Domain result entry (required)
:return: A Demisto entry containing the shortened domain info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': {'Positive URLs': vtCountPositives(entry), 'Provider': providers['vt']}}
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'],
'Contents': 'Unknown provider for result: ' + entry['Brand']}
def get_error(execute_command_result):
"""
execute_command_result must contain error entry - check the result first with is_error function
if there is no error entry in the result then it will raise an Exception
:type execute_command_result: ``dict`` or ``list``
:param execute_command_result: result of demisto.executeCommand()
:return: Error message extracted from the demisto.executeCommand() result
:rtype: ``string``
"""
if not is_error(execute_command_result):
raise ValueError("execute_command_result has no error entry. before using get_error use is_error")
if isinstance(execute_command_result, dict):
return execute_command_result['Contents']
error_messages = []
for entry in execute_command_result:
is_error_entry = type(entry) == dict and entry['Type'] == entryTypes['error']
if is_error_entry:
error_messages.append(entry['Contents'])
return '\n'.join(error_messages)
def is_error(execute_command_result):
"""
Check if the given execute_command_result has an error entry
:type execute_command_result: ``dict`` or ``list``
:param execute_command_result: Demisto entry (required) or result of demisto.executeCommand()
:return: True if the execute_command_result has an error entry, false otherwise
:rtype: ``bool``
"""
if execute_command_result is None:
return False
if isinstance(execute_command_result, list):
if len(execute_command_result) > 0:
for entry in execute_command_result:
if type(entry) == dict and entry['Type'] == entryTypes['error']:
return True
return type(execute_command_result) == dict and execute_command_result['Type'] == entryTypes['error']
isError = is_error
def FormatADTimestamp(ts):
"""
Formats an Active Directory timestamp into human readable time representation
:type ts: ``int``
:param ts: The timestamp to be formatted (required)
:return: A string represeting the time
:rtype: ``str``
"""
return (datetime(year=1601, month=1, day=1) + timedelta(seconds=int(ts) / 10 ** 7)).ctime()
def PrettifyCompactedTimestamp(x):
"""
Formats a compacted timestamp string into human readable time representation
:type x: ``str``
:param x: The timestamp to be formatted (required)
:return: A string represeting the time
:rtype: ``str``
"""
return '%s-%s-%sT%s:%s:%s' % (x[:4], x[4:6], x[6:8], x[8:10], x[10:12], x[12:])
def NormalizeRegistryPath(strRegistryPath):
"""
Normalizes a registry path string
:type strRegistryPath: ``str``
:param strRegistryPath: The registry path (required)
:return: The normalized string
:rtype: ``str``
"""
dSub = {
'HKCR': 'HKEY_CLASSES_ROOT',
'HKCU': 'HKEY_CURRENT_USER',
'HKLM': 'HKEY_LOCAL_MACHINE',
'HKU': 'HKEY_USERS',
'HKCC': 'HKEY_CURRENT_CONFIG',
'HKPD': 'HKEY_PERFORMANCE_DATA'
}
for k in dSub:
if strRegistryPath[:len(k)] == k:
return dSub[k] + strRegistryPath[len(k):]
return strRegistryPath
def scoreToReputation(score):
"""
Converts score (in number format) to human readable reputation format
:type score: ``int``
:param score: The score to be formatted (required)
:return: The formatted score
:rtype: ``str``
"""
to_str = {
4: 'Critical',
3: 'Bad',
2: 'Suspicious',
1: 'Good',
0.5: 'Informational',
0: 'Unknown'
}
return to_str.get(score, 'None')
def b64_encode(text):
"""
Base64 encode a string. Wrapper function around base64.b64encode which will accept a string
In py3 will encode the string to binary using utf-8 encoding and return a string result decoded using utf-8
:param text: string to encode
:type text: str
:return: encoded string
:rtype: str
"""
if not text:
return ''
elif isinstance(text, bytes):
to_encode = text
else:
to_encode = text.encode('utf-8', 'ignore')
res = base64.b64encode(to_encode)
if IS_PY3:
res = res.decode('utf-8') # type: ignore
return res
def encode_string_results(text):
"""
Encode string as utf-8, if any unicode character exists.
:param text: string to encode
:type text: str
:return: encoded string
:rtype: str
"""
if not isinstance(text, STRING_OBJ_TYPES):
return text
try:
return str(text)
except UnicodeEncodeError:
return text.encode("utf8", "replace")
def safe_load_json(json_object):
"""
Safely loads a JSON object from an argument. Allows the argument to accept either a JSON in string form,
or an entry ID corresponding to a JSON file.
:param json_object: Entry ID or JSON string.
:type json_object: str
:return: Dictionary object from a parsed JSON file or string.
:rtype: dict
"""
safe_json = None
if isinstance(json_object, dict) or isinstance(json_object, list):
return json_object
if (json_object.startswith('{') and json_object.endswith('}')) or (
json_object.startswith('[') and json_object.endswith(']')):
try:
safe_json = json.loads(json_object)
except ValueError as e:
return_error(
'Unable to parse JSON string. Please verify the JSON is valid. - ' + str(e))
else:
try:
path = demisto.getFilePath(json_object)
with open(path['path'], 'rb') as data:
try:
safe_json = json.load(data)
except Exception: # lgtm [py/catch-base-exception]
safe_json = json.loads(data.read())
except Exception as e:
return_error('Unable to parse JSON file. Please verify the JSON is valid or the Entry'
'ID is correct. - ' + str(e))
return safe_json
def datetime_to_string(datetime_obj):
"""
Converts a datetime object into a string. When used with `json.dumps()` for the `default` parameter,
e.g. `json.dumps(response, default=datetime_to_string)` datetime_to_string allows entire JSON objects
to be safely added to context without causing any datetime marshalling errors.
:param datetime_obj: Datetime object.
:type datetime_obj: datetime.datetime
:return: String representation of a datetime object.
:rtype: str
"""
if isinstance(datetime_obj, datetime): # type: ignore
return datetime_obj.__str__()
def remove_empty_elements(d):
"""
Recursively remove empty lists, empty dicts, or None elements from a dictionary.
:param d: Input dictionary.
:type d: dict
:return: Dictionary with all empty lists, and empty dictionaries removed.
:rtype: dict
"""
def empty(x):
return x is None or x == {} or x == []
if not isinstance(d, (dict, list)):
return d
elif isinstance(d, list):
return [v for v in (remove_empty_elements(v) for v in d) if not empty(v)]
else:
return {k: v for k, v in ((k, remove_empty_elements(v)) for k, v in d.items()) if not empty(v)}
class SmartGetDict(dict):
"""A dict that when called with get(key, default) will return the default passed
value, even if there is a value of "None" in the place of the key. Example with built-in dict:
```
>>> d = {}
>>> d['test'] = None
>>> d.get('test', 1)
>>> print(d.get('test', 1))
None
```
Example with SmartGetDict:
```
>>> d = SmartGetDict()
>>> d['test'] = None
>>> d.get('test', 1)
>>> print(d.get('test', 1))
1
```
:return: SmartGetDict
:rtype: ``SmartGetDict``
"""
def get(self, key, default=None):
res = dict.get(self, key)
if res is not None:
return res
return default
if (not os.getenv('COMMON_SERVER_NO_AUTO_PARAMS_REMOVE_NULLS')) and hasattr(demisto, 'params') and demisto.params():
demisto.callingContext['params'] = SmartGetDict(demisto.params())
def aws_table_to_markdown(response, table_header):
"""
Converts a raw response from AWS into a markdown formatted table. This function checks to see if
there is only one nested dict in the top level of the dictionary and will use the nested data.
:param response: Raw response from AWS
:type response: dict
:param table_header: The header string to use for the table.
:type table_header: str
:return: Markdown formatted table as a string.
:rtype: str
"""
if isinstance(response, dict):
if len(response) == 1:
if isinstance(response[list(response.keys())[0]], dict) or isinstance(
response[list(response.keys())[0]], list):
if isinstance(response[list(response.keys())[0]], list):
list_response = response[list(response.keys())[0]]
if not list_response:
human_readable = tableToMarkdown(table_header, list_response)
elif isinstance(list_response[0], str):
human_readable = tableToMarkdown(
table_header, response)
else:
human_readable = tableToMarkdown(
table_header, response[list(response.keys())[0]])
else:
human_readable = tableToMarkdown(
table_header, response[list(response.keys())[0]])
else:
human_readable = tableToMarkdown(table_header, response)
else:
human_readable = tableToMarkdown(table_header, response)
else:
human_readable = tableToMarkdown(table_header, response)
return human_readable
def stringEscape(st):
"""
Escape newline chars in the given string.
:type st: ``str``
:param st: The string to be modified (required).
:return: A modified string.
:rtype: ``str``
"""
return st.replace('\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
def stringUnEscape(st):
"""
Unescape newline chars in the given string.
:type st: ``str``
:param st: The string to be modified (required).
:return: A modified string.
:rtype: ``str``
"""
return st.replace('\\r', '\r').replace('\\n', '\n').replace('\\t', '\t')
class IntegrationLogger(object):
"""
a logger for python integrations:
use LOG(<message>) to add a record to the logger (message can be any object with __str__)
use LOG.print_log(verbose=True/False) to display all records in War-Room (if verbose) and server log.
use add_replace_strs to add sensitive strings that should be replaced before going to the log.
:type message: ``str``
:param message: The message to be logged
:return: No data returned
:rtype: ``None``
"""
def __init__(self, debug_logging=False):
self.messages = [] # type: list
self.write_buf = [] # type: list
self.replace_strs = [] # type: list
self.curl = [] # type: list
self.buffering = True
self.debug_logging = debug_logging
# if for some reason you don't want to auto add credentials.password to replace strings
# set the os env COMMON_SERVER_NO_AUTO_REPLACE_STRS. Either in CommonServerUserPython, or docker env
if (not os.getenv('COMMON_SERVER_NO_AUTO_REPLACE_STRS') and hasattr(demisto, 'getParam')):
# add common params
sensitive_params = ('key', 'private', 'password', 'secret', 'token', 'credentials', 'service_account')
if demisto.params():
self._iter_sensistive_dict_obj(demisto.params(), sensitive_params)
def _iter_sensistive_dict_obj(self, dict_obj, sensitive_params):
for (k, v) in dict_obj.items():
if isinstance(v, dict): # credentials object case. recurse into the object
self._iter_sensistive_dict_obj(v, sensitive_params)
if v.get('identifier') and v.get('password'): # also add basic auth case
basic_auth = '{}:{}'.format(v.get('identifier'), v.get('password'))
self.add_replace_strs(b64_encode(basic_auth))
elif isinstance(v, STRING_OBJ_TYPES):
k_lower = k.lower()
for p in sensitive_params:
if p in k_lower:
self.add_replace_strs(v, b64_encode(v))
def encode(self, message):
try:
res = str(message)
except UnicodeEncodeError as exception:
# could not decode the message
# if message is an Exception, try encode the exception's message
if isinstance(message, Exception) and message.args and isinstance(message.args[0], STRING_OBJ_TYPES):
res = message.args[0].encode('utf-8', 'replace') # type: ignore
elif isinstance(message, STRING_OBJ_TYPES):
# try encode the message itself
res = message.encode('utf-8', 'replace') # type: ignore
else:
res = "Failed encoding message with error: {}".format(exception)
for s in self.replace_strs:
res = res.replace(s, '<XX_REPLACED>')
return res
def __call__(self, message):
text = self.encode(message)
if self.buffering:
self.messages.append(text)
if self.debug_logging:
demisto.debug(text)
else:
demisto.info(text)
return text
def add_replace_strs(self, *args):
'''
Add strings which will be replaced when logging.
Meant for avoiding passwords and so forth in the log.
'''
to_add = []
for a in args:
if a:
a = self.encode(a)
to_add.append(stringEscape(a))
to_add.append(stringUnEscape(a))
self.replace_strs.extend(to_add)
def set_buffering(self, state):
"""
set whether the logger buffers messages or writes staight to the demisto log
:param state: True/False
:type state: boolean
"""
self.buffering = state
def print_log(self, verbose=False):
if self.write_buf:
self.messages.append("".join(self.write_buf))
if self.messages:
text = 'Full Integration Log:\n' + '\n'.join(self.messages)
if verbose:
demisto.log(text)
if not self.debug_logging: # we don't print out if in debug_logging as already all message where printed
demisto.info(text)
self.messages = []
def build_curl(self, text):
"""
Parses the HTTP client "send" log messages and generates cURL queries out of them.
:type text: ``str``
:param text: The HTTP client log message.
:return: No data returned
:rtype: ``None``
"""
http_methods = ['GET', 'POST', 'PUT', 'DELETE', 'PATCH']
data = text.split("send: b'")[1]
if data and data[0] in {'{', '<'}:
# it is the request url query params/post body - will always come after we already have the url and headers
# `<` is for xml body
self.curl[-1] += "-d '{}".format(data)
elif any(http_method in data for http_method in http_methods):
method = ''
url = ''
headers = []
headers_to_skip = ['Content-Length', 'User-Agent', 'Accept-Encoding', 'Connection']
request_parts = repr(data).split('\\\\r\\\\n') # splitting lines on repr since data is a bytes-string
for line, part in enumerate(request_parts):
if line == 0:
method, url, _ = part[1:].split() # ignoring " at first char
elif line != len(request_parts) - 1: # ignoring the last line which is empty
if part.startswith('Host:'):
_, host = part.split('Host: ')
url = 'https://{}{}'.format(host, url)
else:
if any(header_to_skip in part for header_to_skip in headers_to_skip):
continue
headers.append(part)
curl_headers = ''
for header in headers:
if header:
curl_headers += '-H "{}" '.format(header)
curl = 'curl -X {} {} {}'.format(method, url, curl_headers)
if demisto.params().get('proxy'):
proxy_address = os.environ.get('https_proxy')
if proxy_address:
curl += '--proxy {} '.format(proxy_address)
else:
curl += '--noproxy "*" '
if demisto.params().get('insecure'):
curl += '-k '
self.curl.append(curl)
def write(self, msg):
# same as __call__ but allows IntegrationLogger to act as a File like object.
msg = self.encode(msg)
has_newline = False
if '\n' in msg:
has_newline = True
# if new line is last char we trim it out
if msg[-1] == '\n':
msg = msg[:-1]
self.write_buf.append(msg)
if has_newline:
text = "".join(self.write_buf)
if self.buffering:
self.messages.append(text)
else:
demisto.info(text)
if is_debug_mode() and text.startswith('send:'):
try:
self.build_curl(text)
except Exception as e: # should fail silently
demisto.debug('Failed generating curl - {}'.format(str(e)))
self.write_buf = []
def print_override(self, *args, **kwargs):
# print function that can be used to override print usage of internal modules
# will print to the log if the print target is stdout/stderr
try:
import __builtin__ # type: ignore
except ImportError:
# Python 3
import builtins as __builtin__ # type: ignore
file_ = kwargs.get('file')
if (not file_) or file_ == sys.stdout or file_ == sys.stderr:
kwargs['file'] = self
__builtin__.print(*args, **kwargs)
"""
a logger for python integrations:
use LOG(<message>) to add a record to the logger (message can be any object with __str__)
use LOG.print_log() to display all records in War-Room and server log.
"""
LOG = IntegrationLogger(debug_logging=is_debug_mode())
def formatAllArgs(args, kwds):
"""
makes a nice string representation of all the arguments
:type args: ``list``
:param args: function arguments (required)
:type kwds: ``dict``
:param kwds: function keyword arguments (required)
:return: string representation of all the arguments
:rtype: ``string``
"""
formattedArgs = ','.join([repr(a) for a in args]) + ',' + str(kwds).replace(':', "=").replace(" ", "")[1:-1]
return formattedArgs
def logger(func):
"""
decorator function to log the function call using LOG
:type func: ``function``
:param func: function to call (required)
:return: returns the func return value.
:rtype: ``any``
"""
def func_wrapper(*args, **kwargs):
LOG('calling {}({})'.format(func.__name__, formatAllArgs(args, kwargs)))
ret_val = func(*args, **kwargs)
if is_debug_mode():
LOG('Return value [{}]: {}'.format(func.__name__, str(ret_val)))
return ret_val
return func_wrapper
def formatCell(data, is_pretty=True):
"""
Convert a given object to md while decending multiple levels
:type data: ``str`` or ``list``
:param data: The cell content (required)
:type is_pretty: ``bool``
:param is_pretty: Should cell content be prettified (default is True)
:return: The formatted cell content as a string
:rtype: ``str``
"""
if isinstance(data, STRING_TYPES):
return data
elif isinstance(data, dict):
return '\n'.join([u'{}: {}'.format(k, flattenCell(v, is_pretty)) for k, v in data.items()])
else:
return flattenCell(data, is_pretty)
def flattenCell(data, is_pretty=True):
"""
Flattens a markdown table cell content into a single string
:type data: ``str`` or ``list``
:param data: The cell content (required)
:type is_pretty: ``bool``
:param is_pretty: Should cell content be pretified (default is True)
:return: A sting representation of the cell content
:rtype: ``str``
"""
indent = 4 if is_pretty else None
if isinstance(data, STRING_TYPES):
return data
elif isinstance(data, list):
string_list = []
for d in data:
try:
if IS_PY3 and isinstance(d, bytes):
string_list.append(d.decode('utf-8'))
else:
string_list.append(str(d))
except UnicodeEncodeError:
string_list.append(d.encode('utf-8'))
return ',\n'.join(string_list)
else:
return json.dumps(data, indent=indent, ensure_ascii=False)
def FormatIso8601(t):
"""
Convert a time expressed in seconds to ISO 8601 time format string
:type t: ``int``
:param t: Time expressed in seconds (required)
:return: An ISO 8601 time format string
:rtype: ``str``
"""
return t.strftime("%Y-%m-%dT%H:%M:%S")
def argToList(arg, separator=','):
"""
Converts a string representation of args to a python list
:type arg: ``str`` or ``list``
:param arg: Args to be converted (required)
:type separator: ``str``
:param separator: A string separator to separate the strings, the default is a comma.
:return: A python list of args
:rtype: ``list``
"""
if not arg:
return []
if isinstance(arg, list):
return arg
if isinstance(arg, STRING_TYPES):
if arg[0] == '[' and arg[-1] == ']':
return json.loads(arg)
return [s.strip() for s in arg.split(separator)]
return [arg]
def argToBoolean(value):
"""
Boolean-ish arguments that are passed through demisto.args() could be type bool or type string.
This command removes the guesswork and returns a value of type bool, regardless of the input value's type.
It will also return True for 'yes' and False for 'no'.
:param value: the value to evaluate
:type value: ``string|bool``
:return: a boolean representatation of 'value'
:rtype: ``bool``
"""
if isinstance(value, bool):
return value
if isinstance(value, STRING_OBJ_TYPES):
if value.lower() in ['true', 'yes']:
return True
elif value.lower() in ['false', 'no']:
return False
else:
raise ValueError('Argument does not contain a valid boolean-like value')
else:
raise ValueError('Argument is neither a string nor a boolean')
def appendContext(key, data, dedup=False):
"""
Append data to the investigation context
:type key: ``str``
:param key: The context path (required)
:type data: ``any``
:param data: Data to be added to the context (required)
:type dedup: ``bool``
:param dedup: True if de-duplication is required. Default is False.
:return: No data returned
:rtype: ``None``
"""
if data is None:
return
existing = demisto.get(demisto.context(), key)
if existing:
if isinstance(existing, STRING_TYPES):
if isinstance(data, STRING_TYPES):
new_val = data + ',' + existing
else:
new_val = data + existing # will raise a self explanatory TypeError
elif isinstance(existing, dict):
if isinstance(data, dict):
new_val = [existing, data] # type: ignore[assignment]
else:
new_val = data + existing # will raise a self explanatory TypeError
elif isinstance(existing, list):
if isinstance(data, list):
existing.extend(data)
else:
existing.append(data)
new_val = existing # type: ignore[assignment]
else:
new_val = [existing, data] # type: ignore[assignment]
if dedup and isinstance(new_val, list):
new_val = list(set(new_val))
demisto.setContext(key, new_val)
else:
demisto.setContext(key, data)
def url_to_clickable_markdown(data, url_keys):
"""
Turn the given urls fields in to clickable url, used for the markdown table.
:type data: ``[Union[str, List[Any], Dict[str, Any]]]``
:param data: a dictionary or a list containing data with some values that are urls
:type url_keys: ``List[str]``
:param url_keys: the keys of the url's wished to turn clickable
:return: markdown format for clickable url
:rtype: ``[Union[str, List[Any], Dict[str, Any]]]``
"""
if isinstance(data, list):
data = [url_to_clickable_markdown(item, url_keys) for item in data]
elif isinstance(data, dict):
data = {key: create_clickable_url(value) if key in url_keys else url_to_clickable_markdown(data[key], url_keys)
for key, value in data.items()}
return data
def create_clickable_url(url):
"""
Make the given url clickable when in markdown format by concatenating itself, with the proper brackets
:type url: ``Union[List[str], str]``
:param url: the url of interest or a list of urls
:return: markdown format for clickable url
:rtype: ``str``
"""
if not url:
return None
elif isinstance(url, list):
return ['[{}]({})'.format(item, item) for item in url]
return '[{}]({})'.format(url, url)
def tableToMarkdown(name, t, headers=None, headerTransform=None, removeNull=False, metadata=None, url_keys=None):
"""
Converts a demisto table in JSON form to a Markdown table
:type name: ``str``
:param name: The name of the table (required)
:type t: ``dict`` or ``list``
:param t: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:type headers: ``list`` or ``string``
:param headers: A list of headers to be presented in the output table (by order). If string will be passed
then table will have single header. Default will include all available headers.
:type headerTransform: ``function``
:param headerTransform: A function that formats the original data headers (optional)
:type removeNull: ``bool``
:param removeNull: Remove empty columns from the table. Default is False
:type metadata: ``str``
:param metadata: Metadata about the table contents
:type url_keys: ``list``
:param url_keys: a list of keys in the given JSON table that should be turned in to clickable
:return: A string representation of the markdown table
:rtype: ``str``
"""
# Turning the urls in the table to clickable
if url_keys:
t = url_to_clickable_markdown(t, url_keys)
mdResult = ''
if name:
mdResult = '### ' + name + '\n'
if metadata:
mdResult += metadata + '\n'
if not t or len(t) == 0:
mdResult += '**No entries.**\n'
return mdResult
if not isinstance(t, list):
t = [t]
if headers and isinstance(headers, STRING_TYPES):
headers = [headers]
if not isinstance(t[0], dict):
# the table contains only simple objects (strings, numbers)
# should be only one header
if headers and len(headers) > 0:
header = headers[0]
t = map(lambda item: dict((h, item) for h in [header]), t)
else:
raise Exception("Missing headers param for tableToMarkdown. Example: headers=['Some Header']")
# in case of headers was not provided (backward compatibility)
if not headers:
headers = list(t[0].keys())
headers.sort()
if removeNull:
headers_aux = headers[:]
for header in headers:
if all(obj.get(header) in ('', None, [], {}) for obj in t):
headers_aux.remove(header)
headers = headers_aux
if t and len(headers) > 0:
newHeaders = []
if headerTransform is None: # noqa
def headerTransform(s): return stringEscapeMD(s, True, True) # noqa
for header in headers:
newHeaders.append(headerTransform(header))
mdResult += '|'
if len(newHeaders) == 1:
mdResult += newHeaders[0]
else:
mdResult += '|'.join(newHeaders)
mdResult += '|\n'
sep = '---'
mdResult += '|' + '|'.join([sep] * len(headers)) + '|\n'
for entry in t:
vals = [stringEscapeMD((formatCell(entry.get(h, ''), False) if entry.get(h) is not None else ''),
True, True) for h in headers]
# this pipe is optional
mdResult += '| '
try:
mdResult += ' | '.join(vals)
except UnicodeDecodeError:
vals = [str(v) for v in vals]
mdResult += ' | '.join(vals)
mdResult += ' |\n'
else:
mdResult += '**No entries.**\n'
return mdResult
tblToMd = tableToMarkdown
def createContextSingle(obj, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type obj: ``dict`` or ``list``
:param obj: The data to be added to the context (required)
:type id: ``str``
:param id: The ID of the context entry
:type keyTransform: ``function``
:param keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:param removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
res = {} # type: dict
if keyTransform is None:
def keyTransform(s): return s # noqa
keys = obj.keys()
for key in keys:
if removeNull and obj[key] in ('', None, [], {}):
continue
values = key.split('.')
current = res
for v in values[:-1]:
current.setdefault(v, {})
current = current[v]
current[keyTransform(values[-1])] = obj[key]
if id is not None:
res.setdefault('ID', id)
return res
def createContext(data, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type data: ``dict`` or ``list``
:param data: The data to be added to the context (required)
:type id: ``str``
:param id: The ID of the context entry
:type keyTransform: ``function``
:param keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:param removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
if isinstance(data, (list, tuple)):
return [createContextSingle(d, id, keyTransform, removeNull) for d in data]
else:
return createContextSingle(data, id, keyTransform, removeNull)
def sectionsToMarkdown(root):
"""
Converts a list of Demisto JSON tables to markdown string of tables
:type root: ``dict`` or ``list``
:param root: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:return: A string representation of the markdown table
:rtype: ``str``
"""
mdResult = ''
if isinstance(root, dict):
for section in root:
data = root[section]
if isinstance(data, dict):
data = [data]
data = [{k: formatCell(row[k]) for k in row} for row in data]
mdResult += tblToMd(section, data)
return mdResult
def fileResult(filename, data, file_type=None):
"""
Creates a file from the given data
:type filename: ``str``
:param filename: The name of the file to be created (required)
:type data: ``str`` or ``bytes``
:param data: The file data (required)
:type file_type: ``str``
:param file_type: one of the entryTypes file or entryInfoFile (optional)
:return: A Demisto war room entry
:rtype: ``dict``
"""
if file_type is None:
file_type = entryTypes['file']
temp = demisto.uniqueFile()
# pylint: disable=undefined-variable
if (IS_PY3 and isinstance(data, str)) or (not IS_PY3 and isinstance(data, unicode)): # type: ignore # noqa: F821
data = data.encode('utf-8')
# pylint: enable=undefined-variable
with open(demisto.investigation()['id'] + '_' + temp, 'wb') as f:
f.write(data)
return {'Contents': '', 'ContentsFormat': formats['text'], 'Type': file_type, 'File': filename, 'FileID': temp}
def hash_djb2(s, seed=5381):
"""
Hash string with djb2 hash function
:type s: ``str``
:param s: The input string to hash
:type seed: ``int``
:param seed: The seed for the hash function (default is 5381)
:return: The hashed value
:rtype: ``int``
"""
hash_name = seed
for x in s:
hash_name = ((hash_name << 5) + hash_name) + ord(x)
return hash_name & 0xFFFFFFFF
def file_result_existing_file(filename, saveFilename=None):
"""
Rename an existing file
:type filename: ``str``
:param filename: The name of the file to be modified (required)
:type saveFilename: ``str``
:param saveFilename: The new file name
:return: A Demisto war room entry
:rtype: ``dict``
"""
temp = demisto.uniqueFile()
os.rename(filename, demisto.investigation()['id'] + '_' + temp)
return {'Contents': '', 'ContentsFormat': formats['text'], 'Type': entryTypes['file'],
'File': saveFilename if saveFilename else filename, 'FileID': temp}
def flattenRow(rowDict):
"""
Flatten each element in the given rowDict
:type rowDict: ``dict``
:param rowDict: The dict to be flattened (required)
:return: A flattened dict
:rtype: ``dict``
"""
return {k: formatCell(rowDict[k]) for k in rowDict}
def flattenTable(tableDict):
"""
Flatten each row in the given tableDict
:type tableDict: ``dict``
:param tableDict: The table to be flattened (required)
:return: A flattened table
:rtype: ``dict``
"""
return [flattenRow(row) for row in tableDict]
MARKDOWN_CHARS = r"\`*_{}[]()#+-!|"
def stringEscapeMD(st, minimal_escaping=False, escape_multiline=False):
"""
Escape any chars that might break a markdown string
:type st: ``str``
:param st: The string to be modified (required)
:type minimal_escaping: ``bool``
:param minimal_escaping: Whether replace all special characters or table format only (optional)
:type escape_multiline: ``bool``
:param escape_multiline: Whether convert line-ending characters (optional)
:return: A modified string
:rtype: ``str``
"""
if escape_multiline:
st = st.replace('\r\n', '<br>') # Windows
st = st.replace('\r', '<br>') # old Mac
st = st.replace('\n', '<br>') # Unix
if minimal_escaping:
for c in '|':
st = st.replace(c, '\\' + c)
else:
st = "".join(["\\" + str(c) if c in MARKDOWN_CHARS else str(c) for c in st])
return st
def raiseTable(root, key):
newInternal = {}
if key in root and isinstance(root[key], dict):
for sub in root[key]:
if sub not in root:
root[sub] = root[key][sub]
else:
newInternal[sub] = root[key][sub]
if newInternal:
root[key] = newInternal
else:
del root[key]
def zoomField(item, fieldName):
if isinstance(item, dict) and fieldName in item:
return item[fieldName]
else:
return item
def isCommandAvailable(cmd):
"""
Check the list of available modules to see whether a command is currently available to be run.
:type cmd: ``str``
:param cmd: The command to check (required)
:return: True if command is available, False otherwise
:rtype: ``bool``
"""
modules = demisto.getAllSupportedCommands()
for m in modules:
if modules[m] and isinstance(modules[m], list):
for c in modules[m]:
if c['name'] == cmd:
return True
return False
def epochToTimestamp(epoch):
return datetime.utcfromtimestamp(epoch / 1000.0).strftime("%Y-%m-%d %H:%M:%S")
def formatTimeColumns(data, timeColumnNames):
for row in data:
for k in timeColumnNames:
row[k] = epochToTimestamp(row[k])
def strip_tag(tag):
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = OrderedDict() # type: dict
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None # type: ignore
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = OrderedDict() # type: dict
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if 'pretty' in options:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options={}, strip_ns=1, strip=1):
"""
Convert an XML string into a JSON string.
:type xmlstring: ``str``
:param xmlstring: The string to be converted (required)
:return: The converted JSON
:rtype: ``dict`` or ``list``
"""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem, encoding='utf-8')
def get_hash_type(hash_file):
"""
Checks the type of the given hash. Returns 'md5', 'sha1', 'sha256' or 'Unknown'.
:type hash_file: ``str``
:param hash_file: The hash to be checked (required)
:return: The hash type
:rtype: ``str``
"""
hash_len = len(hash_file)
if (hash_len == 32):
return 'md5'
elif (hash_len == 40):
return 'sha1'
elif (hash_len == 64):
return 'sha256'
elif (hash_len == 128):
return 'sha512'
else:
return 'Unknown'
def is_mac_address(mac):
"""
Test for valid mac address
:type mac: ``str``
:param mac: MAC address in the form of AA:BB:CC:00:11:22
:return: True/False
:rtype: ``bool``
"""
if re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:
return True
else:
return False
def is_ipv6_valid(address):
"""
Checks if the given string represents a valid IPv6 address.
:type address: str
:param address: The string to check.
:return: True if the given string represents a valid IPv6 address.
:rtype: ``bool``
"""
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
return True
def is_ip_valid(s, accept_v6_ips=False):
"""
Checks if the given string represents a valid IP address.
By default, will only return 'True' for IPv4 addresses.
:type s: ``str``
:param s: The string to be checked (required)
:type accept_v6_ips: ``bool``
:param accept_v6_ips: A boolean determining whether the
function should accept IPv6 addresses
:return: True if the given string represents a valid IP address, False otherwise
:rtype: ``bool``
"""
a = s.split('.')
if accept_v6_ips and is_ipv6_valid(s):
return True
elif len(a) != 4:
return False
else:
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
def get_integration_name():
"""
Getting calling integration's name
:return: Calling integration's name
:rtype: ``str``
"""
return demisto.callingContext.get('IntegrationBrand')
class Common(object):
class Indicator(object):
"""
interface class
"""
@abstractmethod
def to_context(self):
pass
class DBotScore(object):
"""
DBotScore class
:type indicator: ``str``
:param indicator: indicator value, ip, hash, domain, url, etc
:type indicator_type: ``DBotScoreType``
:param indicator_type: use DBotScoreType class
:type integration_name: ``str``
:param integration_name: integration name
:type score: ``DBotScore``
:param score: DBotScore.NONE, DBotScore.GOOD, DBotScore.SUSPICIOUS, DBotScore.BAD
:type malicious_description: ``str``
:param malicious_description: if the indicator is malicious and have explanation for it then set it to this field
:type reliability: ``DBotScoreReliability``
:param reliability: use DBotScoreReliability class
:return: None
:rtype: ``None``
"""
NONE = 0
GOOD = 1
SUSPICIOUS = 2
BAD = 3
CONTEXT_PATH = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor ' \
'&& val.Type == obj.Type)'
CONTEXT_PATH_PRIOR_V5_5 = 'DBotScore'
def __init__(self, indicator, indicator_type, integration_name, score, malicious_description=None,
reliability=None):
if not DBotScoreType.is_valid_type(indicator_type):
raise TypeError('indicator_type must be of type DBotScoreType enum')
if not Common.DBotScore.is_valid_score(score):
raise TypeError('indicator_type must be of type DBotScore enum')
if reliability and not DBotScoreReliability.is_valid_type(reliability):
raise TypeError('reliability must be of type DBotScoreReliability enum')
self.indicator = indicator
self.indicator_type = indicator_type
self.integration_name = integration_name or get_integration_name()
self.score = score
self.malicious_description = malicious_description
self.reliability = reliability
@staticmethod
def is_valid_score(score):
return score in (
Common.DBotScore.NONE,
Common.DBotScore.GOOD,
Common.DBotScore.SUSPICIOUS,
Common.DBotScore.BAD
)
@staticmethod
def get_context_path():
if is_demisto_version_ge('5.5.0'):
return Common.DBotScore.CONTEXT_PATH
else:
return Common.DBotScore.CONTEXT_PATH_PRIOR_V5_5
def to_context(self):
dbot_context = {
'Indicator': self.indicator,
'Type': self.indicator_type,
'Vendor': self.integration_name,
'Score': self.score
}
if self.reliability:
dbot_context['Reliability'] = self.reliability
ret_value = {
Common.DBotScore.get_context_path(): dbot_context
}
return ret_value
class IP(Indicator):
"""
IP indicator class - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#ip
:type ip: ``str``
:param ip: IP address
:type asn: ``str``
:param asn: The autonomous system name for the IP address, for example: "AS8948".
:type as_owner: ``str``
:param as_owner: The autonomous system owner of the IP.
:type region: ``str``
:param region: The region in which the IP is located.
:type port: ``str``
:param port: Ports that are associated with the IP.
:type internal: ``bool``
:param internal: Whether or not the IP is internal or external.
:type updated_date: ``date``
:param updated_date: The date that the IP was last updated.
:type registrar_abuse_name: ``str``
:param registrar_abuse_name: The name of the contact for reporting abuse.
:type registrar_abuse_address: ``str``
:param registrar_abuse_address: The address of the contact for reporting abuse.
:type registrar_abuse_country: ``str``
:param registrar_abuse_country: The country of the contact for reporting abuse.
:type registrar_abuse_network: ``str``
:param registrar_abuse_network: The network of the contact for reporting abuse.
:type registrar_abuse_phone: ``str``
:param registrar_abuse_phone: The phone number of the contact for reporting abuse.
:type registrar_abuse_email: ``str``
:param registrar_abuse_email: The email address of the contact for reporting abuse.
:type campaign: ``str``
:param campaign: The campaign associated with the IP.
:type traffic_light_protocol: ``str``
:param traffic_light_protocol: The Traffic Light Protocol (TLP) color that is suitable for the IP.
:type community_notes: ``CommunityNotes``
:param community_notes: Notes on the IP that were given by the community.
:type publications: ``Publications``
:param publications: Publications on the ip that was published.
:type threat_types: ``ThreatTypes``
:param threat_types: Threat types that are associated with the file.
:type hostname: ``str``
:param hostname: The hostname that is mapped to this IP address.
:type geo_latitude: ``str``
:param geo_latitude: The geolocation where the IP address is located, in the format: latitude
:type geo_longitude: ``str``
:param geo_longitude: The geolocation where the IP address is located, in the format: longitude.
:type geo_country: ``str``
:param geo_country: The country in which the IP address is located.
:type geo_description: ``str``
:param geo_description: Additional information about the location.
:type detection_engines: ``int``
:param detection_engines: The total number of engines that checked the indicator.
:type positive_engines: ``int``
:param positive_engines: The number of engines that positively detected the indicator as malicious.
:type organization_name: ``str``
:param organization_name: The organization of the IP
:type organization_type: ``str``
:param organization_type:The organization type of the IP
:type tags: ``str``
:param tags: Tags of the IP.
:type malware_family: ``str``
:param malware_family: The malware family associated with the IP.
:type feed_related_indicators: ``FeedRelatedIndicators``
:param feed_related_indicators: List of indicators that are associated with the IP.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type dbot_score: ``DBotScore``
:param dbot_score: If IP has a score then create and set a DBotScore object.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'IP(val.Address && val.Address == obj.Address)'
def __init__(self, ip, dbot_score, asn=None, as_owner=None, region=None, port=None, internal=None,
updated_date=None, registrar_abuse_name=None, registrar_abuse_address=None,
registrar_abuse_country=None, registrar_abuse_network=None, registrar_abuse_phone=None,
registrar_abuse_email=None, campaign=None, traffic_light_protocol=None,
community_notes=None, publications=None, threat_types=None,
hostname=None, geo_latitude=None, geo_longitude=None,
geo_country=None, geo_description=None, detection_engines=None, positive_engines=None,
organization_name=None, organization_type=None, feed_related_indicators=None, tags=None,
malware_family=None, relationships=None):
self.ip = ip
self.asn = asn
self.as_owner = as_owner
self.region = region
self.port = port
self.internal = internal
self.updated_date = updated_date
self.registrar_abuse_name = registrar_abuse_name
self.registrar_abuse_address = registrar_abuse_address
self.registrar_abuse_country = registrar_abuse_country
self.registrar_abuse_network = registrar_abuse_network
self.registrar_abuse_phone = registrar_abuse_phone
self.registrar_abuse_email = registrar_abuse_email
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.community_notes = community_notes
self.publications = publications
self.threat_types = threat_types
self.hostname = hostname
self.geo_latitude = geo_latitude
self.geo_longitude = geo_longitude
self.geo_country = geo_country
self.geo_description = geo_description
self.detection_engines = detection_engines
self.positive_engines = positive_engines
self.organization_name = organization_name
self.organization_type = organization_type
self.feed_related_indicators = feed_related_indicators
self.tags = tags
self.malware_family = malware_family
self.relationships = relationships
if not isinstance(dbot_score, Common.DBotScore):
raise ValueError('dbot_score must be of type DBotScore')
self.dbot_score = dbot_score
def to_context(self):
ip_context = {
'Address': self.ip
}
if self.asn:
ip_context['ASN'] = self.asn
if self.as_owner:
ip_context['ASOwner'] = self.as_owner
if self.region:
ip_context['Region'] = self.region
if self.port:
ip_context['Port'] = self.port
if self.internal:
ip_context['Internal'] = self.internal
if self.updated_date:
ip_context['UpdatedDate'] = self.updated_date
if self.registrar_abuse_name or self.registrar_abuse_address or self.registrar_abuse_country or \
self.registrar_abuse_network or self.registrar_abuse_phone or self.registrar_abuse_email:
ip_context['Registrar'] = {'Abuse': {}}
if self.registrar_abuse_name:
ip_context['Registrar']['Abuse']['Name'] = self.registrar_abuse_name
if self.registrar_abuse_address:
ip_context['Registrar']['Abuse']['Address'] = self.registrar_abuse_address
if self.registrar_abuse_country:
ip_context['Registrar']['Abuse']['Country'] = self.registrar_abuse_country
if self.registrar_abuse_network:
ip_context['Registrar']['Abuse']['Network'] = self.registrar_abuse_network
if self.registrar_abuse_phone:
ip_context['Registrar']['Abuse']['Phone'] = self.registrar_abuse_phone
if self.registrar_abuse_email:
ip_context['Registrar']['Abuse']['Email'] = self.registrar_abuse_email
if self.campaign:
ip_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
ip_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
ip_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
ip_context['Publications'] = publications
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
ip_context['ThreatTypes'] = threat_types
if self.hostname:
ip_context['Hostname'] = self.hostname
if self.geo_latitude or self.geo_country or self.geo_description:
ip_context['Geo'] = {}
if self.geo_latitude and self.geo_longitude:
ip_context['Geo']['Location'] = '{}:{}'.format(self.geo_latitude, self.geo_longitude)
if self.geo_country:
ip_context['Geo']['Country'] = self.geo_country
if self.geo_description:
ip_context['Geo']['Description'] = self.geo_description
if self.organization_name or self.organization_type:
ip_context['Organization'] = {}
if self.organization_name:
ip_context['Organization']['Name'] = self.organization_name
if self.organization_type:
ip_context['Organization']['Type'] = self.organization_type
if self.detection_engines is not None:
ip_context['DetectionEngines'] = self.detection_engines
if self.positive_engines is not None:
ip_context['PositiveDetections'] = self.positive_engines
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
ip_context['FeedRelatedIndicators'] = feed_related_indicators
if self.tags:
ip_context['Tags'] = self.tags
if self.malware_family:
ip_context['MalwareFamily'] = self.malware_family
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
ip_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
ip_context['Relationships'] = relationships_context
ret_value = {
Common.IP.CONTEXT_PATH: ip_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class FileSignature(object):
"""
FileSignature class
:type authentihash: ``str``
:param authentihash: The authentication hash.
:type copyright: ``str``
:param copyright: Copyright information.
:type description: ``str``
:param description: A description of the signature.
:type file_version: ``str``
:param file_version: The file version.
:type internal_name: ``str``
:param internal_name: The internal name of the file.
:type original_name: ``str``
:param original_name: The original name of the file.
:return: None
:rtype: ``None``
"""
def __init__(self, authentihash, copyright, description, file_version, internal_name, original_name):
self.authentihash = authentihash
self.copyright = copyright
self.description = description
self.file_version = file_version
self.internal_name = internal_name
self.original_name = original_name
def to_context(self):
return {
'Authentihash': self.authentihash,
'Copyright': self.copyright,
'Description': self.description,
'FileVersion': self.file_version,
'InternalName': self.internal_name,
'OriginalName': self.original_name,
}
class FeedRelatedIndicators(object):
"""
FeedRelatedIndicators class
Implements Subject Indicators that are associated with Another indicator
:type value: ``str``
:param value: Indicators that are associated with the indicator.
:type indicator_type: ``str``
:param indicator_type: The type of the indicators that are associated with the indicator.
:type description: ``str``
:param description: The description of the indicators that are associated with the indicator.
:return: None
:rtype: ``None``
"""
def __init__(self, value=None, indicator_type=None, description=None):
self.value = value
self.indicator_type = indicator_type
self.description = description
def to_context(self):
return {
'value': self.value,
'type': self.indicator_type,
'description': self.description
}
class CommunityNotes(object):
"""
CommunityNotes class
Implements Subject Community Notes of a indicator
:type note: ``str``
:param note: Notes on the indicator that were given by the community.
:type timestamp: ``Timestamp``
:param timestamp: The time in which the note was published.
:return: None
:rtype: ``None``
"""
def __init__(self, note=None, timestamp=None):
self.note = note
self.timestamp = timestamp
def to_context(self):
return {
'note': self.note,
'timestamp': self.timestamp,
}
class Publications(object):
"""
Publications class
Implements Subject Publications of a indicator
:type source: ``str``
:param source: The source in which the article was published.
:type title: ``str``
:param title: The name of the article.
:type link: ``str``
:param link: A link to the original article.
:type timestamp: ``Timestamp``
:param timestamp: The time in which the article was published.
:return: None
:rtype: ``None``
"""
def __init__(self, source=None, title=None, link=None, timestamp=None):
self.source = source
self.title = title
self.link = link
self.timestamp = timestamp
def to_context(self):
return {
'source': self.source,
'title': self.title,
'link': self.link,
'timestamp': self.timestamp,
}
class Behaviors(object):
"""
Behaviors class
Implements Subject Behaviors of a indicator
:type details: ``str``
:param details:
:type action: ``str``
:param action:
:return: None
:rtype: ``None``
"""
def __init__(self, details=None, action=None):
self.details = details
self.action = action
def to_context(self):
return {
'details': self.details,
'title': self.action,
}
class ThreatTypes(object):
"""
ThreatTypes class
Implements Subject ThreatTypes of a indicator
:type threat_category: ``str``
:param threat_category: The threat category associated to this indicator by the source vendor. For example,
Phishing, Control, TOR, etc.
:type threat_category_confidence: ``str``
:param threat_category_confidence: Threat Category Confidence is the confidence level provided by the vendor
for the threat type category
For example a confidence of 90 for threat type category "malware" means that the vendor rates that this
is 90% confidence of being a malware.
:return: None
:rtype: ``None``
"""
def __init__(self, threat_category=None, threat_category_confidence=None):
self.threat_category = threat_category
self.threat_category_confidence = threat_category_confidence
def to_context(self):
return {
'threatcategory': self.threat_category,
'threatcategoryconfidence': self.threat_category_confidence,
}
class File(Indicator):
"""
File indicator class - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#file
:type name: ``str``
:param name: The full file name (including file extension).
:type entry_id: ``str``
:param entry_id: The ID for locating the file in the War Room.
:type size: ``int``
:param size: The size of the file in bytes.
:type md5: ``str``
:param md5: The MD5 hash of the file.
:type sha1: ``str``
:param sha1: The SHA1 hash of the file.
:type sha256: ``str``
:param sha256: The SHA256 hash of the file.
:type sha512: ``str``
:param sha512: The SHA512 hash of the file.
:type ssdeep: ``str``
:param ssdeep: The ssdeep hash of the file (same as displayed in file entries).
:type extension: ``str``
:param extension: The file extension, for example: "xls".
:type file_type: ``str``
:param file_type: The file type, as determined by libmagic (same as displayed in file entries).
:type hostname: ``str``
:param hostname: The name of the host where the file was found. Should match Path.
:type path: ``str``
:param path: The path where the file is located.
:type company: ``str``
:param company: The name of the company that released a binary.
:type product_name: ``str``
:param product_name: The name of the product to which this file belongs.
:type digital_signature__publisher: ``str``
:param digital_signature__publisher: The publisher of the digital signature for the file.
:type signature: ``FileSignature``
:param signature: File signature class
:type actor: ``str``
:param actor: The actor reference.
:type tags: ``str``
:param tags: Tags of the file.
:type feed_related_indicators: ``FeedRelatedIndicators``
:param feed_related_indicators: List of indicators that are associated with the file.
:type malware_family: ``str``
:param malware_family: The malware family associated with the File.
:type campaign: ``str``
:param campaign:
:type traffic_light_protocol: ``str``
:param traffic_light_protocol:
:type community_notes: ``CommunityNotes``
:param community_notes: Notes on the file that were given by the community.
:type publications: ``Publications``
:param publications: Publications on the file that was published.
:type threat_types: ``ThreatTypes``
:param threat_types: Threat types that are associated with the file.
:type imphash: ``str``
:param imphash: The Imphash hash of the file.
:type quarantined: ``bool``
:param quarantined: Is the file quarantined or not.
:type organization: ``str``
:param organization: The organization of the file.
:type associated_file_names: ``str``
:param associated_file_names: File names that are known as associated to the file.
:type behaviors: ``Behaviors``
:param behaviors: list of behaviors associated with the file.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type dbot_score: ``DBotScore``
:param dbot_score: If file has a score then create and set a DBotScore object
:rtype: ``None``
:return: None
"""
CONTEXT_PATH = 'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || ' \
'val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512 || ' \
'val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTPH || ' \
'val.SSDeep && val.SSDeep == obj.SSDeep)'
def __init__(self, dbot_score, name=None, entry_id=None, size=None, md5=None, sha1=None, sha256=None,
sha512=None, ssdeep=None, extension=None, file_type=None, hostname=None, path=None, company=None,
product_name=None, digital_signature__publisher=None, signature=None, actor=None, tags=None,
feed_related_indicators=None, malware_family=None, imphash=None, quarantined=None, campaign=None,
associated_file_names=None, traffic_light_protocol=None, organization=None, community_notes=None,
publications=None, threat_types=None, behaviors=None, relationships=None):
self.name = name
self.entry_id = entry_id
self.size = size
self.md5 = md5
self.sha1 = sha1
self.sha256 = sha256
self.sha512 = sha512
self.ssdeep = ssdeep
self.extension = extension
self.file_type = file_type
self.hostname = hostname
self.path = path
self.company = company
self.product_name = product_name
self.digital_signature__publisher = digital_signature__publisher
self.signature = signature
self.actor = actor
self.tags = tags
self.feed_related_indicators = feed_related_indicators
self.malware_family = malware_family
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.community_notes = community_notes
self.publications = publications
self.threat_types = threat_types
self.imphash = imphash
self.quarantined = quarantined
self.organization = organization
self.associated_file_names = associated_file_names
self.behaviors = behaviors
self.relationships = relationships
self.dbot_score = dbot_score
def to_context(self):
file_context = {}
if self.name:
file_context['Name'] = self.name
if self.entry_id:
file_context['EntryID'] = self.entry_id
if self.size:
file_context['Size'] = self.size
if self.md5:
file_context['MD5'] = self.md5
if self.sha1:
file_context['SHA1'] = self.sha1
if self.sha256:
file_context['SHA256'] = self.sha256
if self.sha512:
file_context['SHA512'] = self.sha512
if self.ssdeep:
file_context['SSDeep'] = self.ssdeep
if self.extension:
file_context['Extension'] = self.extension
if self.file_type:
file_context['Type'] = self.file_type
if self.hostname:
file_context['Hostname'] = self.hostname
if self.path:
file_context['Path'] = self.path
if self.company:
file_context['Company'] = self.company
if self.product_name:
file_context['ProductName'] = self.product_name
if self.digital_signature__publisher:
file_context['DigitalSignature'] = {
'Published': self.digital_signature__publisher
}
if self.signature:
file_context['Signature'] = self.signature.to_context()
if self.actor:
file_context['Actor'] = self.actor
if self.tags:
file_context['Tags'] = self.tags
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
file_context['FeedRelatedIndicators'] = feed_related_indicators
if self.malware_family:
file_context['MalwareFamily'] = self.malware_family
if self.campaign:
file_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
file_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
file_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
file_context['Publications'] = publications
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
file_context['ThreatTypes'] = threat_types
if self.imphash:
file_context['Imphash'] = self.imphash
if self.quarantined:
file_context['Quarantined'] = self.quarantined
if self.organization:
file_context['Organization'] = self.organization
if self.associated_file_names:
file_context['AssociatedFileNames'] = self.associated_file_names
if self.behaviors:
behaviors = []
for behavior in self.behaviors:
behaviors.append(behavior.to_context())
file_context['Behavior'] = behaviors
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
file_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
file_context['Relationships'] = relationships_context
ret_value = {
Common.File.CONTEXT_PATH: file_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class CVE(Indicator):
"""
CVE indicator class - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#cve
:type id: ``str``
:param id: The ID of the CVE, for example: "CVE-2015-1653".
:type cvss: ``str``
:param cvss: The CVSS of the CVE, for example: "10.0".
:type published: ``str``
:param published: The timestamp of when the CVE was published.
:type modified: ``str``
:param modified: The timestamp of when the CVE was last modified.
:type description: ``str``
:param description: A description of the CVE.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'CVE(val.ID && val.ID == obj.ID)'
def __init__(self, id, cvss, published, modified, description, relationships=None):
# type (str, str, str, str, str) -> None
self.id = id
self.cvss = cvss
self.published = published
self.modified = modified
self.description = description
self.dbot_score = Common.DBotScore(
indicator=id,
indicator_type=DBotScoreType.CVE,
integration_name=None,
score=Common.DBotScore.NONE
)
self.relationships = relationships
def to_context(self):
cve_context = {
'ID': self.id
}
if self.cvss:
cve_context['CVSS'] = self.cvss
if self.published:
cve_context['Published'] = self.published
if self.modified:
cve_context['Modified'] = self.modified
if self.description:
cve_context['Description'] = self.description
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
cve_context['Relationships'] = relationships_context
ret_value = {
Common.CVE.CONTEXT_PATH: cve_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class EMAIL(Indicator):
"""
EMAIL indicator class
:type address ``str``
:param address: The email's address.
:type domain: ``str``
:param domain: The domain of the Email.
:type blocked: ``bool``
:param blocked: Whether the email address is blocked.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Email(val.Address && val.Address == obj.Address)'
def __init__(self, address, dbot_score, domain=None, blocked=None, relationships=None):
# type (str, str, bool) -> None
self.address = address
self.domain = domain
self.blocked = blocked
self.dbot_score = dbot_score
self.relationships = relationships
def to_context(self):
email_context = {
'Address': self.address
}
if self.domain:
email_context['Domain'] = self.domain
if self.blocked:
email_context['Blocked'] = self.blocked
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
email_context['Relationships'] = relationships_context
ret_value = {
Common.EMAIL.CONTEXT_PATH: email_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class URL(Indicator):
"""
URL indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#url
:type url: ``str``
:param url: The URL
:type detection_engines: ``int``
:param detection_engines: The total number of engines that checked the indicator.
:type positive_detections: ``int``
:param positive_detections: The number of engines that positively detected the indicator as malicious.
:type category: ``str``
:param category: The category associated with the indicator.
:type feed_related_indicators: ``FeedRelatedIndicators``
:param feed_related_indicators: List of indicators that are associated with the URL.
:type malware_family: ``str``
:param malware_family: The malware family associated with the URL.
:type tags: ``str``
:param tags: Tags of the URL.
:type port: ``str``
:param port: Ports that are associated with the URL.
:type internal: ``bool``
:param internal: Whether or not the URL is internal or external.
:type campaign: ``str``
:param campaign: The campaign associated with the URL.
:type traffic_light_protocol: ``str``
:param traffic_light_protocol: The Traffic Light Protocol (TLP) color that is suitable for the URL.
:type threat_types: ``ThreatTypes``
:param threat_types: Threat types that are associated with the file.
:type asn: ``str``
:param asn: The autonomous system name for the URL, for example: 'AS8948'.
:type as_owner: ``str``
:param as_owner: The autonomous system owner of the URL.
:type geo_country: ``str``
:param geo_country: The country in which the URL is located.
:type organization: ``str``
:param organization: The organization of the URL.
:type community_notes: ``CommunityNotes``
:param community_notes: List of notes on the URL that were given by the community.
:type publications: ``Publications``
:param publications: List of publications on the URL that was published.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type dbot_score: ``DBotScore``
:param dbot_score: If URL has reputation then create DBotScore object
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'URL(val.Data && val.Data == obj.Data)'
def __init__(self, url, dbot_score, detection_engines=None, positive_detections=None, category=None,
feed_related_indicators=None, tags=None, malware_family=None, port=None, internal=None,
campaign=None, traffic_light_protocol=None, threat_types=None, asn=None, as_owner=None,
geo_country=None, organization=None, community_notes=None, publications=None, relationships=None):
self.url = url
self.detection_engines = detection_engines
self.positive_detections = positive_detections
self.category = category
self.feed_related_indicators = feed_related_indicators
self.tags = tags
self.malware_family = malware_family
self.port = port
self.internal = internal
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.threat_types = threat_types
self.asn = asn
self.as_owner = as_owner
self.geo_country = geo_country
self.organization = organization
self.community_notes = community_notes
self.publications = publications
self.relationships = relationships
self.dbot_score = dbot_score
def to_context(self):
url_context = {
'Data': self.url
}
if self.detection_engines is not None:
url_context['DetectionEngines'] = self.detection_engines
if self.positive_detections is not None:
url_context['PositiveDetections'] = self.positive_detections
if self.category:
url_context['Category'] = self.category
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
url_context['FeedRelatedIndicators'] = feed_related_indicators
if self.tags:
url_context['Tags'] = self.tags
if self.malware_family:
url_context['MalwareFamily'] = self.malware_family
if self.port:
url_context['Port'] = self.port
if self.internal:
url_context['Internal'] = self.internal
if self.campaign:
url_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
url_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
url_context['ThreatTypes'] = threat_types
if self.asn:
url_context['ASN'] = self.asn
if self.as_owner:
url_context['ASOwner'] = self.as_owner
if self.geo_country:
url_context['Geo'] = {'Country': self.geo_country}
if self.organization:
url_context['Organization'] = self.organization
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
url_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
url_context['Publications'] = publications
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
url_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
url_context['Relationships'] = relationships_context
ret_value = {
Common.URL.CONTEXT_PATH: url_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class Domain(Indicator):
""" ignore docstring
Domain indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#domain
"""
CONTEXT_PATH = 'Domain(val.Name && val.Name == obj.Name)'
def __init__(self, domain, dbot_score, dns=None, detection_engines=None, positive_detections=None,
organization=None, sub_domains=None, creation_date=None, updated_date=None, expiration_date=None,
domain_status=None, name_servers=None, feed_related_indicators=None, malware_family=None,
registrar_name=None, registrar_abuse_email=None, registrar_abuse_phone=None,
registrant_name=None, registrant_email=None, registrant_phone=None, registrant_country=None,
admin_name=None, admin_email=None, admin_phone=None, admin_country=None, tags=None,
domain_idn_name=None, port=None,
internal=None, category=None, campaign=None, traffic_light_protocol=None, threat_types=None,
community_notes=None, publications=None, geo_location=None, geo_country=None,
geo_description=None, tech_country=None, tech_name=None, tech_email=None, tech_organization=None,
billing=None, relationships=None):
self.domain = domain
self.dns = dns
self.detection_engines = detection_engines
self.positive_detections = positive_detections
self.organization = organization
self.sub_domains = sub_domains
self.creation_date = creation_date
self.updated_date = updated_date
self.expiration_date = expiration_date
self.registrar_name = registrar_name
self.registrar_abuse_email = registrar_abuse_email
self.registrar_abuse_phone = registrar_abuse_phone
self.registrant_name = registrant_name
self.registrant_email = registrant_email
self.registrant_phone = registrant_phone
self.registrant_country = registrant_country
self.admin_name = admin_name
self.admin_email = admin_email
self.admin_phone = admin_phone
self.admin_country = admin_country
self.tags = tags
self.domain_status = domain_status
self.name_servers = name_servers
self.feed_related_indicators = feed_related_indicators
self.malware_family = malware_family
self.domain_idn_name = domain_idn_name
self.port = port
self.internal = internal
self.category = category
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.threat_types = threat_types
self.community_notes = community_notes
self.publications = publications
self.geo_location = geo_location
self.geo_country = geo_country
self.geo_description = geo_description
self.tech_country = tech_country
self.tech_name = tech_name
self.tech_organization = tech_organization
self.tech_email = tech_email
self.billing = billing
self.relationships = relationships
self.dbot_score = dbot_score
def to_context(self):
domain_context = {
'Name': self.domain
}
whois_context = {}
if self.dns:
domain_context['DNS'] = self.dns
if self.detection_engines is not None:
domain_context['DetectionEngines'] = self.detection_engines
if self.positive_detections is not None:
domain_context['PositiveDetections'] = self.positive_detections
if self.registrar_name or self.registrar_abuse_email or self.registrar_abuse_phone:
domain_context['Registrar'] = {
'Name': self.registrar_name,
'AbuseEmail': self.registrar_abuse_email,
'AbusePhone': self.registrar_abuse_phone
}
whois_context['Registrar'] = domain_context['Registrar']
if self.registrant_name or self.registrant_phone or self.registrant_email or self.registrant_country:
domain_context['Registrant'] = {
'Name': self.registrant_name,
'Email': self.registrant_email,
'Phone': self.registrant_phone,
'Country': self.registrant_country
}
whois_context['Registrant'] = domain_context['Registrant']
if self.admin_name or self.admin_email or self.admin_phone or self.admin_country:
domain_context['Admin'] = {
'Name': self.admin_name,
'Email': self.admin_email,
'Phone': self.admin_phone,
'Country': self.admin_country
}
whois_context['Admin'] = domain_context['Admin']
if self.organization:
domain_context['Organization'] = self.organization
if self.sub_domains:
domain_context['Subdomains'] = self.sub_domains
if self.domain_status:
domain_context['DomainStatus'] = self.domain_status
whois_context['DomainStatus'] = domain_context['DomainStatus']
if self.creation_date:
domain_context['CreationDate'] = self.creation_date
whois_context['CreationDate'] = domain_context['CreationDate']
if self.updated_date:
domain_context['UpdatedDate'] = self.updated_date
whois_context['UpdatedDate'] = domain_context['UpdatedDate']
if self.expiration_date:
domain_context['ExpirationDate'] = self.expiration_date
whois_context['ExpirationDate'] = domain_context['ExpirationDate']
if self.name_servers:
domain_context['NameServers'] = self.name_servers
whois_context['NameServers'] = domain_context['NameServers']
if self.tags:
domain_context['Tags'] = self.tags
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
domain_context['FeedRelatedIndicators'] = feed_related_indicators
if self.malware_family:
domain_context['MalwareFamily'] = self.malware_family
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
domain_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.domain_idn_name:
domain_context['DomainIDNName'] = self.domain_idn_name
if self.port:
domain_context['Port'] = self.port
if self.internal:
domain_context['Internal'] = self.internal
if self.category:
domain_context['Category'] = self.category
if self.campaign:
domain_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
domain_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
domain_context['ThreatTypes'] = threat_types
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
domain_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
domain_context['Publications'] = publications
if self.geo_location or self.geo_country or self.geo_description:
domain_context['Geo'] = {}
if self.geo_location:
domain_context['Geo']['Location'] = self.geo_location
if self.geo_country:
domain_context['Geo']['Country'] = self.geo_country
if self.geo_description:
domain_context['Geo']['Description'] = self.geo_description
if self.tech_country or self.tech_name or self.tech_organization or self.tech_email:
domain_context['Tech'] = {}
if self.tech_country:
domain_context['Tech']['Country'] = self.tech_country
if self.tech_name:
domain_context['Tech']['Name'] = self.tech_name
if self.tech_organization:
domain_context['Tech']['Organization'] = self.tech_organization
if self.tech_email:
domain_context['Tech']['Email'] = self.tech_email
if self.billing:
domain_context['Billing'] = self.billing
if whois_context:
domain_context['WHOIS'] = whois_context
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
domain_context['Relationships'] = relationships_context
ret_value = {
Common.Domain.CONTEXT_PATH: domain_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class Endpoint(Indicator):
""" ignore docstring
Endpoint indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#endpoint
"""
CONTEXT_PATH = 'Endpoint(val.ID && val.ID == obj.ID)'
def __init__(self, id, hostname=None, ip_address=None, domain=None, mac_address=None,
os=None, os_version=None, dhcp_server=None, bios_version=None, model=None,
memory=None, processors=None, processor=None, relationships=None, vendor=None, status=None,
is_isolated=None):
self.id = id
self.hostname = hostname
self.ip_address = ip_address
self.domain = domain
self.mac_address = mac_address
self.os = os
self.os_version = os_version
self.dhcp_server = dhcp_server
self.bios_version = bios_version
self.model = model
self.memory = memory
self.processors = processors
self.processor = processor
self.vendor = vendor
self.status = status
self.is_isolated = is_isolated
self.relationships = relationships
def to_context(self):
endpoint_context = {
'ID': self.id
}
if self.hostname:
endpoint_context['Hostname'] = self.hostname
if self.ip_address:
endpoint_context['IPAddress'] = self.ip_address
if self.domain:
endpoint_context['Domain'] = self.domain
if self.mac_address:
endpoint_context['MACAddress'] = self.mac_address
if self.os:
endpoint_context['OS'] = self.os
if self.os_version:
endpoint_context['OSVersion'] = self.os_version
if self.dhcp_server:
endpoint_context['DHCPServer'] = self.dhcp_server
if self.bios_version:
endpoint_context['BIOSVersion'] = self.bios_version
if self.model:
endpoint_context['Model'] = self.model
if self.memory:
endpoint_context['Memory'] = self.memory
if self.processors:
endpoint_context['Processors'] = self.processors
if self.processor:
endpoint_context['Processor'] = self.processor
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
endpoint_context['Relationships'] = relationships_context
if self.vendor:
endpoint_context['Vendor'] = self.vendor
if self.status:
if self.status not in ENDPOINT_STATUS_OPTIONS:
raise ValueError('Status does not have a valid value such as: Online or Offline')
endpoint_context['Status'] = self.status
if self.is_isolated:
if self.is_isolated not in ENDPOINT_ISISOLATED_OPTIONS:
raise ValueError('Is Isolated does not have a valid value such as: Yes, No, Pending'
' isolation or Pending unisolation')
endpoint_context['IsIsolated'] = self.is_isolated
ret_value = {
Common.Endpoint.CONTEXT_PATH: endpoint_context
}
return ret_value
class Account(Indicator):
"""
Account indicator - https://xsoar.pan.dev/docs/integrations/context-standards-recommended#account
:type dbot_score: ``DBotScore``
:param dbot_score: If account has reputation then create DBotScore object
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Account(val.id && val.id == obj.id)'
def __init__(self, id, type=None, username=None, display_name=None, groups=None,
domain=None, email_address=None, telephone_number=None, office=None, job_title=None,
department=None, country=None, state=None, city=None, street=None, is_enabled=None,
dbot_score=None, relationships=None):
self.id = id
self.type = type
self.username = username
self.display_name = display_name
self.groups = groups
self.domain = domain
self.email_address = email_address
self.telephone_number = telephone_number
self.office = office
self.job_title = job_title
self.department = department
self.country = country
self.state = state
self.city = city
self.street = street
self.is_enabled = is_enabled
self.relationships = relationships
if not isinstance(dbot_score, Common.DBotScore):
raise ValueError('dbot_score must be of type DBotScore')
self.dbot_score = dbot_score
def to_context(self):
account_context = {
'Id': self.id
}
if self.type:
account_context['Type'] = self.type
irrelevent = ['CONTEXT_PATH', 'to_context', 'dbot_score', 'Id']
details = [detail for detail in dir(self) if not detail.startswith('__') and detail not in irrelevent]
for detail in details:
if self.__getattribute__(detail):
if detail == 'email_address':
account_context['Email'] = {
'Address': self.email_address
}
else:
Detail = camelize_string(detail, '_')
account_context[Detail] = self.__getattribute__(detail)
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
account_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
account_context['Relationships'] = relationships_context
ret_value = {
Common.Account.CONTEXT_PATH: account_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class Cryptocurrency(Indicator):
"""
Cryptocurrency indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#cryptocurrency
:type address: ``str``
:param address: The Cryptocurrency address
:type address_type: ``str``
:param address_type: The Cryptocurrency type - e.g. `bitcoin`.
:type dbot_score: ``DBotScore``
:param dbot_score: If the address has reputation then create DBotScore object.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Cryptocurrency(val.Address && val.Address == obj.Address)'
def __init__(self, address, address_type, dbot_score):
self.address = address
self.address_type = address_type
self.dbot_score = dbot_score
def to_context(self):
crypto_context = {
'Address': self.address,
'AddressType': self.address_type
}
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
crypto_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
ret_value = {
Common.Cryptocurrency.CONTEXT_PATH: crypto_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class CertificatePublicKey(object):
"""
CertificatePublicKey class
Defines an X509 PublicKey used in Common.Certificate
:type algorithm: ``str``
:param algorithm: The encryption algorithm: DSA, RSA, EC or UNKNOWN (Common.CertificatePublicKey.Algorithm enum)
:type length: ``int``
:param length: The length of the public key
:type publickey: ``Optional[str]``
:param publickey: publickey
:type p: ``Optional[str]``
:param p: P parameter used in DSA algorithm
:type q: ``Optional[str]``
:param q: Q parameter used in DSA algorithm
:type g: ``Optional[str]``
:param g: G parameter used in DSA algorithm
:type modulus: ``Optional[str]``
:param modulus: modulus parameter used in RSA algorithm
:type modulus: ``Optional[int]``
:param modulus: exponent parameter used in RSA algorithm
:type x: ``Optional[str]``
:param x: X parameter used in EC algorithm
:type y: ``Optional[str]``
:param y: Y parameter used in EC algorithm
:type curve: ``Optional[str]``
:param curve: curve parameter used in EC algorithm
:return: None
:rtype: ``None``
"""
class Algorithm(object):
"""
Algorithm class to enumerate available algorithms
:return: None
:rtype: ``None``
"""
DSA = "DSA"
RSA = "RSA"
EC = "EC"
UNKNOWN = "Unknown Algorithm"
@staticmethod
def is_valid_type(_type):
return _type in (
Common.CertificatePublicKey.Algorithm.DSA,
Common.CertificatePublicKey.Algorithm.RSA,
Common.CertificatePublicKey.Algorithm.EC,
Common.CertificatePublicKey.Algorithm.UNKNOWN
)
def __init__(
self,
algorithm, # type: str
length, # type: int
publickey=None, # type: str
p=None, # type: str
q=None, # type: str
g=None, # type: str
modulus=None, # type: str
exponent=None, # type: int
x=None, # type: str
y=None, # type: str
curve=None # type: str
):
if not Common.CertificatePublicKey.Algorithm.is_valid_type(algorithm):
raise TypeError('algorithm must be of type Common.CertificatePublicKey.Algorithm enum')
self.algorithm = algorithm
self.length = length
self.publickey = publickey
self.p = p
self.q = q
self.g = g
self.modulus = modulus
self.exponent = exponent
self.x = x
self.y = y
self.curve = curve
def to_context(self):
publickey_context = {
'Algorithm': self.algorithm,
'Length': self.length
}
if self.publickey:
publickey_context['PublicKey'] = self.publickey
if self.algorithm == Common.CertificatePublicKey.Algorithm.DSA:
if self.p:
publickey_context['P'] = self.p
if self.q:
publickey_context['Q'] = self.q
if self.g:
publickey_context['G'] = self.g
elif self.algorithm == Common.CertificatePublicKey.Algorithm.RSA:
if self.modulus:
publickey_context['Modulus'] = self.modulus
if self.exponent:
publickey_context['Exponent'] = self.exponent
elif self.algorithm == Common.CertificatePublicKey.Algorithm.EC:
if self.x:
publickey_context['X'] = self.x
if self.y:
publickey_context['Y'] = self.y
if self.curve:
publickey_context['Curve'] = self.curve
elif self.algorithm == Common.CertificatePublicKey.Algorithm.UNKNOWN:
pass
return publickey_context
class GeneralName(object):
"""
GeneralName class
Implements GeneralName interface from rfc5280
Enumerates the available General Name Types
:type gn_type: ``str``
:param gn_type: General Name Type
:type gn_value: ``str``
:param gn_value: General Name Value
:return: None
:rtype: ``None``
"""
OTHERNAME = 'otherName'
RFC822NAME = 'rfc822Name'
DNSNAME = 'dNSName'
DIRECTORYNAME = 'directoryName'
UNIFORMRESOURCEIDENTIFIER = 'uniformResourceIdentifier'
IPADDRESS = 'iPAddress'
REGISTEREDID = 'registeredID'
@staticmethod
def is_valid_type(_type):
return _type in (
Common.GeneralName.OTHERNAME,
Common.GeneralName.RFC822NAME,
Common.GeneralName.DNSNAME,
Common.GeneralName.DIRECTORYNAME,
Common.GeneralName.UNIFORMRESOURCEIDENTIFIER,
Common.GeneralName.IPADDRESS,
Common.GeneralName.REGISTEREDID
)
def __init__(
self,
gn_value, # type: str
gn_type # type: str
):
if not Common.GeneralName.is_valid_type(gn_type):
raise TypeError(
'gn_type must be of type Common.GeneralName enum'
)
self.gn_type = gn_type
self.gn_value = gn_value
def to_context(self):
return {
'Type': self.gn_type,
'Value': self.gn_value
}
def get_value(self):
return self.gn_value
class CertificateExtension(object):
"""
CertificateExtension class
Defines an X509 Certificate Extensions used in Common.Certificate
:type extension_type: ``str``
:param extension_type: The type of Extension (from Common.CertificateExtension.ExtensionType enum, or "Other)
:type critical: ``bool``
:param critical: Whether the extension is marked as critical
:type extension_name: ``Optional[str]``
:param extension_name: Name of the extension
:type oid: ``Optional[str]``
:param oid: OID of the extension
:type subject_alternative_names: ``Optional[List[Common.CertificateExtension.SubjectAlternativeName]]``
:param subject_alternative_names: Subject Alternative Names
:type authority_key_identifier: ``Optional[Common.CertificateExtension.AuthorityKeyIdentifier]``
:param authority_key_identifier: Authority Key Identifier
:type digest: ``Optional[str]``
:param digest: digest for Subject Key Identifier extension
:type digital_signature: ``Optional[bool]``
:param digital_signature: Digital Signature usage for Key Usage extension
:type content_commitment: ``Optional[bool]``
:param content_commitment: Content Commitment usage for Key Usage extension
:type key_encipherment: ``Optional[bool]``
:param key_encipherment: Key Encipherment usage for Key Usage extension
:type data_encipherment: ``Optional[bool]``
:param data_encipherment: Data Encipherment usage for Key Usage extension
:type key_agreement: ``Optional[bool]``
:param key_agreement: Key Agreement usage for Key Usage extension
:type key_cert_sign: ``Optional[bool]``
:param key_cert_sign: Key Cert Sign usage for Key Usage extension
:type usages: ``Optional[List[str]]``
:param usages: Usages for Extended Key Usage extension
:type distribution_points: ``Optional[List[Common.CertificateExtension.DistributionPoint]]``
:param distribution_points: Distribution Points
:type certificate_policies: ``Optional[List[Common.CertificateExtension.CertificatePolicy]]``
:param certificate_policies: Certificate Policies
:type authority_information_access: ``Optional[List[Common.CertificateExtension.AuthorityInformationAccess]]``
:param authority_information_access: Authority Information Access
:type basic_constraints: ``Optional[Common.CertificateExtension.BasicConstraints]``
:param basic_constraints: Basic Constraints
:type signed_certificate_timestamps: ``Optional[List[Common.CertificateExtension.SignedCertificateTimestamp]]``
:param signed_certificate_timestamps: (PreCertificate)Signed Certificate Timestamps
:type value: ``Optional[Union[str, List[Any], Dict[str, Any]]]``
:param value: Raw value of the Extension (used for "Other" type)
:return: None
:rtype: ``None``
"""
class SubjectAlternativeName(object):
"""
SubjectAlternativeName class
Implements Subject Alternative Name extension interface
:type gn: ``Optional[Common.GeneralName]``
:param gn: General Name Type provided as Common.GeneralName
:type gn_type: ``Optional[str]``
:param gn_type: General Name Type provided as string
:type gn_value: ``Optional[str]``
:param gn_value: General Name Value provided as string
:return: None
:rtype: ``None``
"""
def __init__(
self,
gn=None, # type: Optional[Common.GeneralName]
gn_type=None, # type: Optional[str]
gn_value=None # type: Optional[str]
):
if gn:
self.gn = gn
elif gn_type and gn_value:
self.gn = Common.GeneralName(
gn_value=gn_value,
gn_type=gn_type
)
else:
raise ValueError('either GeneralName or gn_type/gn_value required to inizialize SubjectAlternativeName')
def to_context(self):
return self.gn.to_context()
def get_value(self):
return self.gn.get_value()
class AuthorityKeyIdentifier(object):
"""
AuthorityKeyIdentifier class
Implements Authority Key Identifier extension interface
:type issuer: ``Optional[List[Common.GeneralName]]``
:param issuer: Issuer list
:type serial_number: ``Optional[str]``
:param serial_number: Serial Number
:type key_identifier: ``Optional[str]``
:param key_identifier: Key Identifier
:return: None
:rtype: ``None``
"""
def __init__(
self,
issuer=None, # type: Optional[List[Common.GeneralName]]
serial_number=None, # type: Optional[str]
key_identifier=None # type: Optional[str]
):
self.issuer = issuer
self.serial_number = serial_number
self.key_identifier = key_identifier
def to_context(self):
authority_key_identifier_context = {} # type: Dict[str, Any]
if self.issuer:
authority_key_identifier_context['Issuer'] = self.issuer,
if self.serial_number:
authority_key_identifier_context["SerialNumber"] = self.serial_number
if self.key_identifier:
authority_key_identifier_context["KeyIdentifier"] = self.key_identifier
return authority_key_identifier_context
class DistributionPoint(object):
"""
DistributionPoint class
Implements Distribution Point extension interface
:type full_name: ``Optional[List[Common.GeneralName]]``
:param full_name: Full Name list
:type relative_name: ``Optional[str]``
:param relative_name: Relative Name
:type crl_issuer: ``Optional[List[Common.GeneralName]]``
:param crl_issuer: CRL Issuer
:type reasons: ``Optional[List[str]]``
:param reasons: Reason list
:return: None
:rtype: ``None``
"""
def __init__(
self,
full_name=None, # type: Optional[List[Common.GeneralName]]
relative_name=None, # type: Optional[str]
crl_issuer=None, # type: Optional[List[Common.GeneralName]]
reasons=None # type: Optional[List[str]]
):
self.full_name = full_name
self.relative_name = relative_name
self.crl_issuer = crl_issuer
self.reasons = reasons
def to_context(self):
distribution_point_context = {} # type: Dict[str, Union[List, str]]
if self.full_name:
distribution_point_context["FullName"] = [fn.to_context() for fn in self.full_name]
if self.relative_name:
distribution_point_context["RelativeName"] = self.relative_name
if self.crl_issuer:
distribution_point_context["CRLIssuer"] = [ci.to_context() for ci in self.crl_issuer]
if self.reasons:
distribution_point_context["Reasons"] = self.reasons
return distribution_point_context
class CertificatePolicy(object):
"""
CertificatePolicy class
Implements Certificate Policy extension interface
:type policy_identifier: ``str``
:param policy_identifier: Policy Identifier
:type policy_qualifiers: ``Optional[List[str]]``
:param policy_qualifiers: Policy Qualifier list
:return: None
:rtype: ``None``
"""
def __init__(
self,
policy_identifier, # type: str
policy_qualifiers=None # type: Optional[List[str]]
):
self.policy_identifier = policy_identifier
self.policy_qualifiers = policy_qualifiers
def to_context(self):
certificate_policies_context = {
"PolicyIdentifier": self.policy_identifier
} # type: Dict[str, Union[List, str]]
if self.policy_qualifiers:
certificate_policies_context["PolicyQualifiers"] = self.policy_qualifiers
return certificate_policies_context
class AuthorityInformationAccess(object):
"""
AuthorityInformationAccess class
Implements Authority Information Access extension interface
:type access_method: ``str``
:param access_method: Access Method
:type access_location: ``Common.GeneralName``
:param access_location: Access Location
:return: None
:rtype: ``None``
"""
def __init__(
self,
access_method, # type: str
access_location # type: Common.GeneralName
):
self.access_method = access_method
self.access_location = access_location
def to_context(self):
return {
"AccessMethod": self.access_method,
"AccessLocation": self.access_location.to_context()
}
class BasicConstraints(object):
"""
BasicConstraints class
Implements Basic Constraints extension interface
:type ca: ``bool``
:param ca: Certificate Authority
:type path_length: ``int``
:param path_length: Path Length
:return: None
:rtype: ``None``
"""
def __init__(
self,
ca, # type: bool
path_length=None # type: int
):
self.ca = ca
self.path_length = path_length
def to_context(self):
basic_constraints_context = {
"CA": self.ca
} # type: Dict[str, Union[str, int]]
if self.path_length:
basic_constraints_context["PathLength"] = self.path_length
return basic_constraints_context
class SignedCertificateTimestamp(object):
"""
SignedCertificateTimestamp class
Implementsinterface for "SignedCertificateTimestamp" extensions
:type entry_type: ``str``
:param entry_type: Entry Type (from Common.CertificateExtension.SignedCertificateTimestamp.EntryType enum)
:type version: ``str``
:param version: Version
:type log_id: ``str``
:param log_id: Log ID
:type timestamp: ``str``
:param timestamp: Timestamp (ISO8601 string representation in UTC)
:return: None
:rtype: ``None``
"""
class EntryType(object):
"""
EntryType class
Enumerates Entry Types for SignedCertificateTimestamp class
:return: None
:rtype: ``None``
"""
PRECERTIFICATE = "PreCertificate"
X509CERTIFICATE = "X509Certificate"
@staticmethod
def is_valid_type(_type):
return _type in (
Common.CertificateExtension.SignedCertificateTimestamp.EntryType.PRECERTIFICATE,
Common.CertificateExtension.SignedCertificateTimestamp.EntryType.X509CERTIFICATE
)
def __init__(
self,
entry_type, # type: str
version, # type: int
log_id, # type: str
timestamp # type: str
):
if not Common.CertificateExtension.SignedCertificateTimestamp.EntryType.is_valid_type(entry_type):
raise TypeError(
'entry_type must be of type Common.CertificateExtension.SignedCertificateTimestamp.EntryType enum'
)
self.entry_type = entry_type
self.version = version
self.log_id = log_id
self.timestamp = timestamp
def to_context(self):
timestamps_context = {} # type: Dict[str, Any]
timestamps_context['Version'] = self.version
timestamps_context["LogId"] = self.log_id
timestamps_context["Timestamp"] = self.timestamp
timestamps_context["EntryType"] = self.entry_type
return timestamps_context
class ExtensionType(object):
"""
ExtensionType class
Enumerates Extension Types for Common.CertificatExtension class
:return: None
:rtype: ``None``
"""
SUBJECTALTERNATIVENAME = "SubjectAlternativeName"
AUTHORITYKEYIDENTIFIER = "AuthorityKeyIdentifier"
SUBJECTKEYIDENTIFIER = "SubjectKeyIdentifier"
KEYUSAGE = "KeyUsage"
EXTENDEDKEYUSAGE = "ExtendedKeyUsage"
CRLDISTRIBUTIONPOINTS = "CRLDistributionPoints"
CERTIFICATEPOLICIES = "CertificatePolicies"
AUTHORITYINFORMATIONACCESS = "AuthorityInformationAccess"
BASICCONSTRAINTS = "BasicConstraints"
SIGNEDCERTIFICATETIMESTAMPS = "SignedCertificateTimestamps"
PRESIGNEDCERTIFICATETIMESTAMPS = "PreCertSignedCertificateTimestamps"
OTHER = "Other"
@staticmethod
def is_valid_type(_type):
return _type in (
Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME,
Common.CertificateExtension.ExtensionType.AUTHORITYKEYIDENTIFIER,
Common.CertificateExtension.ExtensionType.SUBJECTKEYIDENTIFIER,
Common.CertificateExtension.ExtensionType.KEYUSAGE,
Common.CertificateExtension.ExtensionType.EXTENDEDKEYUSAGE,
Common.CertificateExtension.ExtensionType.CRLDISTRIBUTIONPOINTS,
Common.CertificateExtension.ExtensionType.CERTIFICATEPOLICIES,
Common.CertificateExtension.ExtensionType.AUTHORITYINFORMATIONACCESS,
Common.CertificateExtension.ExtensionType.BASICCONSTRAINTS,
Common.CertificateExtension.ExtensionType.SIGNEDCERTIFICATETIMESTAMPS,
Common.CertificateExtension.ExtensionType.PRESIGNEDCERTIFICATETIMESTAMPS,
Common.CertificateExtension.ExtensionType.OTHER # for extensions that are not handled explicitly
)
def __init__(
self,
extension_type, # type: str
critical, # type: bool
oid=None, # type: Optional[str]
extension_name=None, # type: Optional[str]
subject_alternative_names=None, # type: Optional[List[Common.CertificateExtension.SubjectAlternativeName]]
authority_key_identifier=None, # type: Optional[Common.CertificateExtension.AuthorityKeyIdentifier]
digest=None, # type: str
digital_signature=None, # type: Optional[bool]
content_commitment=None, # type: Optional[bool]
key_encipherment=None, # type: Optional[bool]
data_encipherment=None, # type: Optional[bool]
key_agreement=None, # type: Optional[bool]
key_cert_sign=None, # type: Optional[bool]
crl_sign=None, # type: Optional[bool]
usages=None, # type: Optional[List[str]]
distribution_points=None, # type: Optional[List[Common.CertificateExtension.DistributionPoint]]
certificate_policies=None, # type: Optional[List[Common.CertificateExtension.CertificatePolicy]]
authority_information_access=None, # type: Optional[List[Common.CertificateExtension.AuthorityInformationAccess]]
basic_constraints=None, # type: Optional[Common.CertificateExtension.BasicConstraints]
signed_certificate_timestamps=None, # type: Optional[List[Common.CertificateExtension.SignedCertificateTimestamp]]
value=None # type: Optional[Union[str, List[Any], Dict[str, Any]]]
):
if not Common.CertificateExtension.ExtensionType.is_valid_type(extension_type):
raise TypeError('algorithm must be of type Common.CertificateExtension.ExtensionType enum')
self.extension_type = extension_type
self.critical = critical
if self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME:
self.subject_alternative_names = subject_alternative_names
self.oid = "2.5.29.17"
self.extension_name = "subjectAltName"
elif self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTKEYIDENTIFIER:
if not digest:
raise ValueError('digest is mandatory for SubjectKeyIdentifier extension')
self.digest = digest
self.oid = "2.5.29.14"
self.extension_name = "subjectKeyIdentifier"
elif self.extension_type == Common.CertificateExtension.ExtensionType.KEYUSAGE:
self.digital_signature = digital_signature
self.content_commitment = content_commitment
self.key_encipherment = key_encipherment
self.data_encipherment = data_encipherment
self.key_agreement = key_agreement
self.key_cert_sign = key_cert_sign
self.crl_sign = crl_sign
self.oid = "2.5.29.15"
self.extension_name = "keyUsage"
elif self.extension_type == Common.CertificateExtension.ExtensionType.EXTENDEDKEYUSAGE:
if not usages:
raise ValueError('usages is mandatory for ExtendedKeyUsage extension')
self.usages = usages
self.oid = "2.5.29.37"
self.extension_name = "extendedKeyUsage"
elif self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYKEYIDENTIFIER:
self.authority_key_identifier = authority_key_identifier
self.oid = "2.5.29.35"
self.extension_name = "authorityKeyIdentifier"
elif self.extension_type == Common.CertificateExtension.ExtensionType.CRLDISTRIBUTIONPOINTS:
self.distribution_points = distribution_points
self.oid = "2.5.29.31"
self.extension_name = "cRLDistributionPoints"
elif self.extension_type == Common.CertificateExtension.ExtensionType.CERTIFICATEPOLICIES:
self.certificate_policies = certificate_policies
self.oid = "2.5.29.32"
self.extension_name = "certificatePolicies"
elif self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYINFORMATIONACCESS:
self.authority_information_access = authority_information_access
self.oid = "1.3.6.1.5.5.7.1.1"
self.extension_name = "authorityInfoAccess"
elif self.extension_type == Common.CertificateExtension.ExtensionType.BASICCONSTRAINTS:
self.basic_constraints = basic_constraints
self.oid = "2.5.29.19"
self.extension_name = "basicConstraints"
elif self.extension_type == Common.CertificateExtension.ExtensionType.PRESIGNEDCERTIFICATETIMESTAMPS:
self.signed_certificate_timestamps = signed_certificate_timestamps
self.oid = "1.3.6.1.4.1.11129.2.4.2"
self.extension_name = "signedCertificateTimestampList"
elif self.extension_type == Common.CertificateExtension.ExtensionType.SIGNEDCERTIFICATETIMESTAMPS:
self.signed_certificate_timestamps = signed_certificate_timestamps
self.oid = "1.3.6.1.4.1.11129.2.4.5"
self.extension_name = "signedCertificateTimestampList"
elif self.extension_type == Common.CertificateExtension.ExtensionType.OTHER:
self.value = value
# override oid, extension_name if provided as inputs
if oid:
self.oid = oid
if extension_name:
self.extension_name = extension_name
def to_context(self):
extension_context = {
"OID": self.oid,
"Name": self.extension_name,
"Critical": self.critical
} # type: Dict[str, Any]
if (
self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME
and self.subject_alternative_names is not None
):
extension_context["Value"] = [san.to_context() for san in self.subject_alternative_names]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYKEYIDENTIFIER
and self.authority_key_identifier is not None
):
extension_context["Value"] = self.authority_key_identifier.to_context()
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTKEYIDENTIFIER
and self.digest is not None
):
extension_context["Value"] = {
"Digest": self.digest
}
elif self.extension_type == Common.CertificateExtension.ExtensionType.KEYUSAGE:
key_usage = {} # type: Dict[str, bool]
if self.digital_signature:
key_usage["DigitalSignature"] = self.digital_signature
if self.content_commitment:
key_usage["ContentCommitment"] = self.content_commitment
if self.key_encipherment:
key_usage["KeyEncipherment"] = self.key_encipherment
if self.data_encipherment:
key_usage["DataEncipherment"] = self.data_encipherment
if self.key_agreement:
key_usage["KeyAgreement"] = self.key_agreement
if self.key_cert_sign:
key_usage["KeyCertSign"] = self.key_cert_sign
if self.crl_sign:
key_usage["CrlSign"] = self.crl_sign
if key_usage:
extension_context["Value"] = key_usage
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.EXTENDEDKEYUSAGE
and self.usages is not None
):
extension_context["Value"] = {
"Usages": [u for u in self.usages]
}
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.CRLDISTRIBUTIONPOINTS
and self.distribution_points is not None
):
extension_context["Value"] = [dp.to_context() for dp in self.distribution_points]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.CERTIFICATEPOLICIES
and self.certificate_policies is not None
):
extension_context["Value"] = [cp.to_context() for cp in self.certificate_policies]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYINFORMATIONACCESS
and self.authority_information_access is not None
):
extension_context["Value"] = [aia.to_context() for aia in self.authority_information_access]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.BASICCONSTRAINTS
and self.basic_constraints is not None
):
extension_context["Value"] = self.basic_constraints.to_context()
elif (
self.extension_type in [
Common.CertificateExtension.ExtensionType.SIGNEDCERTIFICATETIMESTAMPS,
Common.CertificateExtension.ExtensionType.PRESIGNEDCERTIFICATETIMESTAMPS
]
and self.signed_certificate_timestamps is not None
):
extension_context["Value"] = [sct.to_context() for sct in self.signed_certificate_timestamps]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.OTHER
and self.value is not None
):
extension_context["Value"] = self.value
return extension_context
class Certificate(Indicator):
"""
Implements the X509 Certificate interface
Certificate indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#certificate
:type subject_dn: ``str``
:param subject_dn: Subject Distinguished Name
:type dbot_score: ``DBotScore``
:param dbot_score: If Certificate has a score then create and set a DBotScore object.
:type name: ``Optional[Union[str, List[str]]]``
:param name: Name (if not provided output is calculated from SubjectDN and SAN)
:type issuer_dn: ``Optional[str]``
:param issuer_dn: Issuer Distinguished Name
:type serial_number: ``Optional[str]``
:param serial_number: Serial Number
:type validity_not_after: ``Optional[str]``
:param validity_not_after: Certificate Expiration Timestamp (ISO8601 string representation)
:type validity_not_before: ``Optional[str]``
:param validity_not_before: Initial Certificate Validity Timestamp (ISO8601 string representation)
:type sha512: ``Optional[str]``
:param sha512: The SHA-512 hash of the certificate in binary encoded format (DER)
:type sha256: ``Optional[str]``
:param sha256: The SHA-256 hash of the certificate in binary encoded format (DER)
:type sha1: ``Optional[str]``
:param sha1: The SHA-1 hash of the certificate in binary encoded format (DER)
:type md5: ``Optional[str]``
:param md5: The MD5 hash of the certificate in binary encoded format (DER)
:type publickey: ``Optional[Common.CertificatePublicKey]``
:param publickey: Certificate Public Key
:type spki_sha256: ``Optional[str]``
:param sha1: The SHA-256 hash of the SPKI
:type signature_algorithm: ``Optional[str]``
:param signature_algorithm: Signature Algorithm
:type signature: ``Optional[str]``
:param signature: Certificate Signature
:type subject_alternative_name: \
``Optional[List[Union[str,Dict[str, str],Common.CertificateExtension.SubjectAlternativeName]]]``
:param subject_alternative_name: Subject Alternative Name list
:type extensions: ``Optional[List[Common.CertificateExtension]]`
:param extensions: Certificate Extension List
:type pem: ``Optional[str]``
:param pem: PEM encoded certificate
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Certificate(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || ' \
'val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512)'
def __init__(
self,
subject_dn, # type: str
dbot_score=None, # type: Optional[Common.DBotScore]
name=None, # type: Optional[Union[str, List[str]]]
issuer_dn=None, # type: Optional[str]
serial_number=None, # type: Optional[str]
validity_not_after=None, # type: Optional[str]
validity_not_before=None, # type: Optional[str]
sha512=None, # type: Optional[str]
sha256=None, # type: Optional[str]
sha1=None, # type: Optional[str]
md5=None, # type: Optional[str]
publickey=None, # type: Optional[Common.CertificatePublicKey]
spki_sha256=None, # type: Optional[str]
signature_algorithm=None, # type: Optional[str]
signature=None, # type: Optional[str]
subject_alternative_name=None, \
# type: Optional[List[Union[str,Dict[str, str],Common.CertificateExtension.SubjectAlternativeName]]]
extensions=None, # type: Optional[List[Common.CertificateExtension]]
pem=None # type: Optional[str]
):
self.subject_dn = subject_dn
self.dbot_score = dbot_score
self.name = None
if name:
if isinstance(name, str):
self.name = [name]
elif isinstance(name, list):
self.name = name
else:
raise TypeError('certificate name must be of type str or List[str]')
self.issuer_dn = issuer_dn
self.serial_number = serial_number
self.validity_not_after = validity_not_after
self.validity_not_before = validity_not_before
self.sha512 = sha512
self.sha256 = sha256
self.sha1 = sha1
self.md5 = md5
if publickey and not isinstance(publickey, Common.CertificatePublicKey):
raise TypeError('publickey must be of type Common.CertificatePublicKey')
self.publickey = publickey
self.spki_sha256 = spki_sha256
self.signature_algorithm = signature_algorithm
self.signature = signature
# if subject_alternative_name is set and is a list
# make sure it is a list of strings, dicts of strings or SAN Extensions
if (
subject_alternative_name
and isinstance(subject_alternative_name, list)
and not all(
isinstance(san, str)
or isinstance(san, dict)
or isinstance(san, Common.CertificateExtension.SubjectAlternativeName)
for san in subject_alternative_name)
):
raise TypeError(
'subject_alternative_name must be list of str or Common.CertificateExtension.SubjectAlternativeName'
)
self.subject_alternative_name = subject_alternative_name
if (
extensions
and not isinstance(extensions, list)
and any(isinstance(e, Common.CertificateExtension) for e in extensions)
):
raise TypeError('extensions must be of type List[Common.CertificateExtension]')
self.extensions = extensions
self.pem = pem
if not isinstance(dbot_score, Common.DBotScore):
raise ValueError('dbot_score must be of type DBotScore')
def to_context(self):
certificate_context = {
"SubjectDN": self.subject_dn
} # type: Dict[str, Any]
san_list = [] # type: List[Dict[str, str]]
if self.subject_alternative_name:
for san in self.subject_alternative_name:
if isinstance(san, str):
san_list.append({
'Value': san
})
elif isinstance(san, dict):
san_list.append(san)
elif(isinstance(san, Common.CertificateExtension.SubjectAlternativeName)):
san_list.append(san.to_context())
elif self.extensions: # autogenerate it from extensions
for ext in self.extensions:
if (
ext.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME
and ext.subject_alternative_names is not None
):
for san in ext.subject_alternative_names:
san_list.append(san.to_context())
if san_list:
certificate_context['SubjectAlternativeName'] = san_list
if self.name:
certificate_context["Name"] = self.name
else: # autogenerate it
name = set() # type: Set[str]
# add subject alternative names
if san_list:
name = set([
sn['Value'] for sn in san_list
if (
'Value' in sn
and (
'Type' not in sn
or sn['Type'] in (Common.GeneralName.DNSNAME, Common.GeneralName.IPADDRESS)
)
)
])
# subject_dn is RFC4515 escaped
# replace \, and \+ with the long escaping \2c and \2b
long_escaped_subject_dn = self.subject_dn.replace("\\,", "\\2c")
long_escaped_subject_dn = long_escaped_subject_dn.replace("\\+", "\\2b")
# we then split RDN (separated by ,) and multi-valued RDN (sep by +)
rdns = long_escaped_subject_dn.replace('+', ',').split(',')
cn = next((rdn for rdn in rdns if rdn.startswith('CN=')), None)
if cn:
name.add(cn.split('=', 1)[-1])
if name:
certificate_context["Name"] = sorted(list(name))
if self.issuer_dn:
certificate_context["IssuerDN"] = self.issuer_dn
if self.serial_number:
certificate_context["SerialNumber"] = self.serial_number
if self.validity_not_before:
certificate_context["ValidityNotBefore"] = self.validity_not_before
if self.validity_not_after:
certificate_context["ValidityNotAfter"] = self.validity_not_after
if self.sha512:
certificate_context["SHA512"] = self.sha512
if self.sha256:
certificate_context["SHA256"] = self.sha256
if self.sha1:
certificate_context["SHA1"] = self.sha1
if self.md5:
certificate_context["MD5"] = self.md5
if self.publickey and isinstance(self.publickey, Common.CertificatePublicKey):
certificate_context["PublicKey"] = self.publickey.to_context()
if self.spki_sha256:
certificate_context["SPKISHA256"] = self.spki_sha256
sig = {} # type: Dict[str, str]
if self.signature_algorithm:
sig["Algorithm"] = self.signature_algorithm
if self.signature:
sig["Signature"] = self.signature
if sig:
certificate_context["Signature"] = sig
if self.extensions:
certificate_context["Extension"] = [e.to_context() for e in self.extensions]
if self.pem:
certificate_context["PEM"] = self.pem
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
certificate_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
ret_value = {
Common.Certificate.CONTEXT_PATH: certificate_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class ScheduledCommand:
"""
ScheduledCommand configuration class
Holds the scheduled command configuration for the command result - managing the way the command should be polled.
:type command: ``str``
:param command: The command that'll run after next_run_in_seconds has passed.
:type next_run_in_seconds: ``int``
:param next_run_in_seconds: How long to wait before executing the command.
:type args: ``Optional[Dict[str, Any]]``
:param args: Arguments to use when executing the command.
:type timeout_in_seconds: ``Optional[int]``
:param timeout_in_seconds: Number of seconds until the polling sequence will timeout.
:return: None
:rtype: ``None``
"""
VERSION_MISMATCH_ERROR = 'This command is not supported by this XSOAR server version. Please update your server ' \
'version to 6.2.0 or later.'
def __init__(
self,
command, # type: str
next_run_in_seconds, # type: int
args=None, # type: Optional[Dict[str, Any]]
timeout_in_seconds=None, # type: Optional[int]
):
self.raise_error_if_not_supported()
self._command = command
if next_run_in_seconds < 10:
demisto.info('ScheduledCommandConfiguration provided value for next_run_in_seconds: '
'{} is '.format(next_run_in_seconds) + 'too low - minimum interval is 10 seconds. '
'next_run_in_seconds was set to 10 seconds.')
next_run_in_seconds = 10
self._next_run = str(next_run_in_seconds)
self._args = args
self._timeout = str(timeout_in_seconds) if timeout_in_seconds else None
@staticmethod
def raise_error_if_not_supported():
if not is_demisto_version_ge('6.2.0'):
raise DemistoException(ScheduledCommand.VERSION_MISMATCH_ERROR)
def to_results(self):
"""
Returns the result dictionary of the polling command
"""
return assign_params(
PollingCommand=self._command,
NextRun=self._next_run,
PollingArgs=self._args,
Timeout=self._timeout
)
def camelize_string(src_str, delim='_', upper_camel=True):
"""
Transform snake_case to CamelCase
:type src_str: ``str``
:param src_str: snake_case string to convert.
:type delim: ``str``
:param delim: indicator category.
:type upper_camel: ``bool``
:param upper_camel: When True then transforms string to camel case with the first letter capitalised
(for example: demisto_content to DemistoContent), otherwise the first letter will not be capitalised
(for example: demisto_content to demistoContent).
:return: A CammelCase string.
:rtype: ``str``
"""
if not src_str: # empty string
return ""
components = src_str.split(delim)
camelize_without_first_char = ''.join(map(lambda x: x.title(), components[1:]))
if upper_camel:
return components[0].title() + camelize_without_first_char
else:
return components[0].lower() + camelize_without_first_char
class IndicatorsTimeline:
"""
IndicatorsTimeline class - use to return Indicator Timeline object to be used in CommandResults
:type indicators: ``list``
:param indicators: expects a list of indicators.
:type category: ``str``
:param category: indicator category.
:type message: ``str``
:param message: indicator message.
:return: None
:rtype: ``None``
"""
def __init__(self, indicators=None, category=None, message=None):
# type: (list, str, str) -> None
if indicators is None:
indicators = []
# check if we are running from an integration or automation
try:
_ = demisto.params()
default_category = 'Integration Update'
except AttributeError:
default_category = 'Automation Update'
timelines = []
timeline = {}
for indicator in indicators:
timeline['Value'] = indicator
if category:
timeline['Category'] = category
else:
timeline['Category'] = default_category
if message:
timeline['Message'] = message
timelines.append(timeline)
self.indicators_timeline = timelines
def arg_to_number(arg, arg_name=None, required=False):
# type: (Any, Optional[str], bool) -> Optional[int]
"""Converts an XSOAR argument to a Python int
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` type. It will throw a ValueError
if the input is invalid. If the input is None, it will throw a ValueError
if required is ``True``, or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` if arg can be converted
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None or arg == '':
if required is True:
if arg_name:
raise ValueError('Missing "{}"'.format(arg_name))
else:
raise ValueError('Missing required argument')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
try:
return int(float(arg))
except Exception:
if arg_name:
raise ValueError('Invalid number: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid number'.format(arg))
if isinstance(arg, int):
return arg
if arg_name:
raise ValueError('Invalid number: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid number'.format(arg))
def arg_to_datetime(arg, arg_name=None, is_utc=True, required=False, settings=None):
# type: (Any, Optional[str], bool, bool, dict) -> Optional[datetime]
"""Converts an XSOAR argument to a datetime
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``datetime``. It will throw a ValueError if the input is invalid.
If the input is None, it will throw a ValueError if required is ``True``,
or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type is_utc: ``bool``
:param is_utc: if True then date converted as utc timezone, otherwise will convert with local timezone.
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:type settings: ``dict``
:param settings: If provided, passed to dateparser.parse function.
:return:
returns an ``datetime`` if conversion works
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[datetime]``
"""
if arg is None:
if required is True:
if arg_name:
raise ValueError('Missing "{}"'.format(arg_name))
else:
raise ValueError('Missing required argument')
return None
if isinstance(arg, str) and arg.isdigit() or isinstance(arg, (int, float)):
# timestamp is a str containing digits - we just convert it to int
ms = float(arg)
if ms > 2000000000.0:
# in case timestamp was provided as unix time (in milliseconds)
ms = ms / 1000.0
if is_utc:
return datetime.utcfromtimestamp(ms).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ms)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "3 days", etc
if settings:
date = dateparser.parse(arg, settings=settings)
else:
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
if arg_name:
raise ValueError('Invalid date: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid date'.format(arg))
return date
if arg_name:
raise ValueError('Invalid date: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid date'.format(arg))
# -------------------------------- Relationships----------------------------------- #
class EntityRelationship:
"""
XSOAR entity relationship.
:type name: ``str``
:param name: Relationship name.
:type relationship_type: ``str``
:param relationship_type: Relationship type. (e.g. IndicatorToIndicator...).
:type entity_a: ``str``
:param entity_a: A value, A aka source of the relationship.
:type entity_a_family: ``str``
:param entity_a_family: Entity family of A, A aka source of the relationship. (e.g. Indicator...)
:type entity_a_type: ``str``
:param entity_a_type: Entity A type, A aka source of the relationship. (e.g. IP/URL/...).
:type entity_b: ``str``
:param entity_b: B value, B aka destination of the relationship.
:type entity_b_family: ``str``
:param entity_b_family: Entity family of B, B aka destination of the relationship. (e.g. Indicator...)
:type entity_b_type: ``str``
:param entity_b_type: Entity B type, B aka destination of the relationship. (e.g. IP/URL/...).
:type source_reliability: ``str``
:param source_reliability: Source reliability.
:type fields: ``dict``
:param fields: Custom fields. (Optional)
:type brand: ``str``
:param brand: Source brand name. (Optional)
:return: None
:rtype: ``None``
"""
class RelationshipsTypes(object):
"""
Relationships Types objects.
:return: None
:rtype: ``None``
"""
# dict which keys is a relationship type and the value is the reverse type.
RELATIONSHIP_TYPES = ['IndicatorToIndicator']
@staticmethod
def is_valid_type(_type):
# type: (str) -> bool
return _type in EntityRelationship.RelationshipsTypes.RELATIONSHIP_TYPES
class RelationshipsFamily(object):
"""
Relationships Family object list.
:return: None
:rtype: ``None``
"""
INDICATOR = ["Indicator"]
@staticmethod
def is_valid_type(_type):
# type: (str) -> bool
return _type in EntityRelationship.RelationshipsFamily.INDICATOR
class Relationships(object):
"""
Enum: Relations names and their reverse
:return: None
:rtype: ``None``
"""
APPLIED = 'applied'
ATTACHMENT_OF = 'attachment-of'
ATTACHES = 'attaches'
ATTRIBUTE_OF = 'attribute-of'
ATTRIBUTED_BY = 'attributed-by'
ATTRIBUTED_TO = 'attributed-to'
AUTHORED_BY = 'authored-by'
BEACONS_TO = 'beacons-to'
BUNDLED_IN = 'bundled-in'
BUNDLES = 'bundles'
COMMUNICATED_WITH = 'communicated-with'
COMMUNICATED_BY = 'communicated-by'
COMMUNICATES_WITH = 'communicates-with'
COMPROMISES = 'compromises'
CONTAINS = 'contains'
CONTROLS = 'controls'
CREATED_BY = 'created-by'
CREATES = 'creates'
DELIVERED_BY = 'delivered-by'
DELIVERS = 'delivers'
DOWNLOADS = 'downloads'
DOWNLOADS_FROM = 'downloads-from'
DROPPED_BY = 'dropped-by'
DROPS = 'drops'
DUPLICATE_OF = 'duplicate-of'
EMBEDDED_IN = 'embedded-in'
EMBEDS = 'embeds'
EXECUTED = 'executed'
EXECUTED_BY = 'executed-by'
EXFILTRATES_TO = 'exfiltrates-to'
EXPLOITS = 'exploits'
HAS = 'has'
HOSTED_ON = 'hosted-on'
HOSTS = 'hosts'
IMPERSONATES = 'impersonates'
INDICATED_BY = 'indicated-by'
INDICATOR_OF = 'indicator-of'
INJECTED_FROM = 'injected-from'
INJECTS_INTO = 'injects-into'
INVESTIGATES = 'investigates'
IS_ALSO = 'is-also'
MITIGATED_BY = 'mitigated-by'
MITIGATES = 'mitigates'
ORIGINATED_FROM = 'originated-from'
OWNED_BY = 'owned-by'
OWNS = 'owns'
PART_OF = 'part-of'
RELATED_TO = 'related-to'
REMEDIATES = 'remediates'
RESOLVED_BY = 'resolved-by'
RESOLVED_FROM = 'resolved-from'
RESOLVES_TO = 'resolves-to'
SEEN_ON = 'seen-on'
SENT = 'sent'
SENT_BY = 'sent-by'
SENT_FROM = 'sent-from'
SENT_TO = 'sent-to'
SIMILAR_TO = 'similar-to'
SUB_DOMAIN_OF = 'sub-domain-of'
SUB_TECHNIQUE_OF = 'subtechnique-of'
PARENT_TECHNIQUE_OF = 'parent-technique-of'
SUPRA_DOMAIN_OF = 'supra-domain-of'
TARGETED_BY = 'targeted-by'
TARGETS = 'targets'
TYPES = 'Types'
UPLOADED_TO = 'uploaded-to'
USED_BY = 'used-by'
USED_ON = 'used-on'
USES = 'uses'
VARIANT_OF = 'variant-of'
RELATIONSHIPS_NAMES = {'applied': 'applied-on',
'attachment-of': 'attaches',
'attaches': 'attachment-of',
'attribute-of': 'owns',
'attributed-by': 'attributed-to',
'attributed-to': 'attributed-by',
'authored-by': 'author-of',
'beacons-to': 'communicated-by',
'bundled-in': 'bundles',
'bundles': 'bundled-in',
'communicated-with': 'communicated-by',
'communicated-by': 'communicates-with',
'communicates-with': 'communicated-by',
'compromises': 'compromised-by',
'contains': 'part-of',
'controls': 'controlled-by',
'created-by': 'creates',
'creates': 'created-by',
'delivered-by': 'delivers',
'delivers': 'delivered-by',
'downloads': 'downloaded-by',
'downloads-from': 'hosts',
'dropped-by': 'drops',
'drops': 'dropped-by',
'duplicate-of': 'duplicate-of',
'embedded-in': 'embeds',
'embeds': 'embedded-on',
'executed': 'executed-by',
'executed-by': 'executes',
'exfiltrates-to': 'exfiltrated-from',
'exploits': 'exploited-by',
'has': 'seen-on',
'hosted-on': 'hosts',
'hosts': 'hosted-on',
'impersonates': 'impersonated-by',
'indicated-by': 'indicator-of',
'indicator-of': 'indicated-by',
'injected-from': 'injects-into',
'injects-into': 'injected-from',
'investigates': 'investigated-by',
'is-also': 'is-also',
'mitigated-by': 'mitigates',
'mitigates': 'mitigated-by',
'originated-from': 'source-of',
'owned-by': 'owns',
'owns': 'owned-by',
'part-of': 'contains',
'related-to': 'related-to',
'remediates': 'remediated-by',
'resolved-by': 'resolves-to',
'resolved-from': 'resolves-to',
'resolves-to': 'resolved-from',
'seen-on': 'has',
'sent': 'attached-to',
'sent-by': 'sent',
'sent-from': 'received-by',
'sent-to': 'received-by',
'similar-to': 'similar-to',
'sub-domain-of': 'supra-domain-of',
'supra-domain-of': 'sub-domain-of',
'subtechnique-of': 'parent-technique-of',
'parent-technique-of': 'subtechnique-of',
'targeted-by': 'targets',
'targets': 'targeted-by',
'Types': 'Reverse',
'uploaded-to': 'hosts',
'used-by': 'uses',
'used-on': 'targeted-by',
'uses': 'used-by',
'variant-of': 'variant-of'}
@staticmethod
def is_valid(_type):
# type: (str) -> bool
return _type in EntityRelationship.Relationships.RELATIONSHIPS_NAMES.keys()
@staticmethod
def get_reverse(name):
# type: (str) -> str
return EntityRelationship.Relationships.RELATIONSHIPS_NAMES[name]
def __init__(self, name, entity_a, entity_a_type, entity_b, entity_b_type,
reverse_name='', relationship_type='IndicatorToIndicator', entity_a_family='Indicator',
entity_b_family='Indicator', source_reliability="", fields=None, brand=""):
# Relationship
if not EntityRelationship.Relationships.is_valid(name):
raise ValueError("Invalid relationship: " + name)
self._name = name
if reverse_name:
if not EntityRelationship.Relationships.is_valid(reverse_name):
raise ValueError("Invalid reverse relationship: " + reverse_name)
self._reverse_name = reverse_name
else:
self._reverse_name = EntityRelationship.Relationships.get_reverse(name)
if not EntityRelationship.RelationshipsTypes.is_valid_type(relationship_type):
raise ValueError("Invalid relationship type: " + relationship_type)
self._relationship_type = relationship_type
# Entity A - Source
self._entity_a = entity_a
self._entity_a_type = entity_a_type
if not EntityRelationship.RelationshipsFamily.is_valid_type(entity_a_family):
raise ValueError("Invalid entity A Family type: " + entity_a_family)
self._entity_a_family = entity_a_family
# Entity B - Destination
if not entity_b:
demisto.info(
"WARNING: Invalid entity B - Relationships will not be created to entity A {} with relationship name {}".format(
str(entity_a), str(name)))
self._entity_b = entity_b
self._entity_b_type = entity_b_type
if not EntityRelationship.RelationshipsFamily.is_valid_type(entity_b_family):
raise ValueError("Invalid entity B Family type: " + entity_b_family)
self._entity_b_family = entity_b_family
# Custom fields
if fields:
self._fields = fields
else:
self._fields = {}
# Source
if brand:
self._brand = brand
else:
self._brand = ''
if source_reliability:
if not DBotScoreReliability.is_valid_type(source_reliability):
raise ValueError("Invalid source reliability value", source_reliability)
self._source_reliability = source_reliability
else:
self._source_reliability = ''
def to_entry(self):
""" Convert object to XSOAR entry
:rtype: ``dict``
:return: XSOAR entry representation.
"""
entry = {}
if self._entity_b:
entry = {
"name": self._name,
"reverseName": self._reverse_name,
"type": self._relationship_type,
"entityA": self._entity_a,
"entityAFamily": self._entity_a_family,
"entityAType": self._entity_a_type,
"entityB": self._entity_b,
"entityBFamily": self._entity_b_family,
"entityBType": self._entity_b_type,
"fields": self._fields,
}
if self._source_reliability:
entry["reliability"] = self._source_reliability
if self._brand:
entry["brand"] = self._brand
return entry
def to_indicator(self):
""" Convert object to XSOAR entry
:rtype: ``dict``
:return: XSOAR entry representation.
"""
indicator_relationship = {}
if self._entity_b:
indicator_relationship = {
"name": self._name,
"reverseName": self._reverse_name,
"type": self._relationship_type,
"entityA": self._entity_a,
"entityAFamily": self._entity_a_family,
"entityAType": self._entity_a_type,
"entityB": self._entity_b,
"entityBFamily": self._entity_b_family,
"entityBType": self._entity_b_type,
"fields": self._fields,
}
return indicator_relationship
def to_context(self):
""" Convert object to XSOAR context
:rtype: ``dict``
:return: XSOAR context representation.
"""
indicator_relationship_context = {}
if self._entity_b:
indicator_relationship_context = {
"Relationship": self._name,
"EntityA": self._entity_a,
"EntityAType": self._entity_a_type,
"EntityB": self._entity_b,
"EntityBType": self._entity_b_type,
}
return indicator_relationship_context
class CommandResults:
"""
CommandResults class - use to return results to warroom
:type outputs_prefix: ``str``
:param outputs_prefix: should be identical to the prefix in the yml contextPath in yml file. for example:
CortexXDR.Incident
:type outputs_key_field: ``str`` or ``list[str]``
:param outputs_key_field: primary key field in the main object. If the command returns Incidents, and of the
properties of Incident is incident_id, then outputs_key_field='incident_id'. If object has multiple
unique keys, then list of strings is supported outputs_key_field=['id1', 'id2']
:type outputs: ``list`` or ``dict``
:param outputs: the data to be returned and will be set to context
:type indicators: ``list``
:param indicators: DEPRECATED: use 'indicator' instead.
:type indicator: ``Common.Indicator``
:param indicator: single indicator like Common.IP, Common.URL, Common.File, etc.
:type readable_output: ``str``
:param readable_output: (Optional) markdown string that will be presented in the warroom, should be human readable -
(HumanReadable) - if not set, readable output will be generated
:type raw_response: ``dict`` | ``list``
:param raw_response: must be dictionary, if not provided then will be equal to outputs. usually must be the original
raw response from the 3rd party service (originally Contents)
:type indicators_timeline: ``IndicatorsTimeline``
:param indicators_timeline: must be an IndicatorsTimeline. used by the server to populate an indicator's timeline.
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: must be a boolean, default value is False. Used to prevent AutoExtract on output.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type mark_as_note: ``bool``
:param mark_as_note: must be a boolean, default value is False. Used to mark entry as note.
:type entry_type: ``int`` code of EntryType
:param entry_type: type of return value, see EntryType
:type scheduled_command: ``ScheduledCommand``
:param scheduled_command: manages the way the command should be polled.
:return: None
:rtype: ``None``
"""
def __init__(self, outputs_prefix=None, outputs_key_field=None, outputs=None, indicators=None, readable_output=None,
raw_response=None, indicators_timeline=None, indicator=None, ignore_auto_extract=False,
mark_as_note=False, scheduled_command=None, relationships=None, entry_type=None):
# type: (str, object, object, list, str, object, IndicatorsTimeline, Common.Indicator, bool, bool, ScheduledCommand, list, int) -> None # noqa: E501
if raw_response is None:
raw_response = outputs
if outputs is not None and not isinstance(outputs, dict) and not outputs_prefix:
raise ValueError('outputs_prefix is missing')
if indicators and indicator:
raise ValueError('indicators is DEPRECATED, use only indicator')
if entry_type is None:
entry_type = EntryType.NOTE
self.indicators = indicators # type: Optional[List[Common.Indicator]]
self.indicator = indicator # type: Optional[Common.Indicator]
self.entry_type = entry_type # type: int
self.outputs_prefix = outputs_prefix
# this is public field, it is used by a lot of unit tests, so I don't change it
self.outputs_key_field = outputs_key_field
self._outputs_key_field = None # type: Optional[List[str]]
if not outputs_key_field:
self._outputs_key_field = None
elif isinstance(outputs_key_field, STRING_TYPES):
self._outputs_key_field = [outputs_key_field]
elif isinstance(outputs_key_field, list):
self._outputs_key_field = outputs_key_field
else:
raise TypeError('outputs_key_field must be of type str or list')
self.outputs = outputs
self.raw_response = raw_response
self.readable_output = readable_output
self.indicators_timeline = indicators_timeline
self.ignore_auto_extract = ignore_auto_extract
self.mark_as_note = mark_as_note
self.scheduled_command = scheduled_command
self.relationships = relationships
def to_context(self):
outputs = {} # type: dict
relationships = [] # type: list
if self.readable_output:
human_readable = self.readable_output
else:
human_readable = None # type: ignore[assignment]
raw_response = None # type: ignore[assignment]
indicators_timeline = [] # type: ignore[assignment]
ignore_auto_extract = False # type: bool
mark_as_note = False # type: bool
indicators = [self.indicator] if self.indicator else self.indicators
if indicators:
for indicator in indicators:
context_outputs = indicator.to_context()
for key, value in context_outputs.items():
if key not in outputs:
outputs[key] = []
outputs[key].append(value)
if self.raw_response:
raw_response = self.raw_response
if self.ignore_auto_extract:
ignore_auto_extract = True
if self.mark_as_note:
mark_as_note = True
if self.indicators_timeline:
indicators_timeline = self.indicators_timeline.indicators_timeline
if self.outputs is not None and self.outputs != []:
if not self.readable_output:
# if markdown is not provided then create table by default
human_readable = tableToMarkdown('Results', self.outputs)
if self.outputs_prefix and self._outputs_key_field:
# if both prefix and key field provided then create DT key
formatted_outputs_key = ' && '.join(['val.{0} && val.{0} == obj.{0}'.format(key_field)
for key_field in self._outputs_key_field])
outputs_key = '{0}({1})'.format(self.outputs_prefix, formatted_outputs_key)
outputs[outputs_key] = self.outputs
elif self.outputs_prefix:
outputs_key = '{}'.format(self.outputs_prefix)
outputs[outputs_key] = self.outputs
else:
outputs.update(self.outputs) # type: ignore[call-overload]
if self.relationships:
relationships = [relationship.to_entry() for relationship in self.relationships if relationship.to_entry()]
content_format = EntryFormat.JSON
if isinstance(raw_response, STRING_TYPES) or isinstance(raw_response, int):
content_format = EntryFormat.TEXT
return_entry = {
'Type': self.entry_type,
'ContentsFormat': content_format,
'Contents': raw_response,
'HumanReadable': human_readable,
'EntryContext': outputs,
'IndicatorTimeline': indicators_timeline,
'IgnoreAutoExtract': True if ignore_auto_extract else False,
'Note': mark_as_note,
'Relationships': relationships,
}
if self.scheduled_command:
return_entry.update(self.scheduled_command.to_results())
return return_entry
def return_results(results):
"""
This function wraps the demisto.results(), supports.
:type results: ``CommandResults`` or ``str`` or ``dict`` or ``BaseWidget`` or ``list``
:param results: A result object to return as a War-Room entry.
:return: None
:rtype: ``None``
"""
if results is None:
# backward compatibility reasons
demisto.results(None)
return
elif results and isinstance(results, list):
result_list = []
for result in results:
if isinstance(result, (dict, str)):
# Results of type dict or str are of the old results format and work with demisto.results()
result_list.append(result)
else:
# The rest are of the new format and have a corresponding function (to_context, to_display, etc...)
return_results(result)
if result_list:
demisto.results(result_list)
elif isinstance(results, CommandResults):
demisto.results(results.to_context())
elif isinstance(results, BaseWidget):
demisto.results(results.to_display())
elif isinstance(results, GetMappingFieldsResponse):
demisto.results(results.extract_mapping())
elif isinstance(results, GetRemoteDataResponse):
demisto.results(results.extract_for_local())
elif isinstance(results, GetModifiedRemoteDataResponse):
demisto.results(results.to_entry())
elif hasattr(results, 'to_entry'):
demisto.results(results.to_entry())
else:
demisto.results(results)
# deprecated
def return_outputs(readable_output, outputs=None, raw_response=None, timeline=None, ignore_auto_extract=False):
"""
DEPRECATED: use return_results() instead
This function wraps the demisto.results(), makes the usage of returning results to the user more intuitively.
:type readable_output: ``str`` | ``int``
:param readable_output: markdown string that will be presented in the warroom, should be human readable -
(HumanReadable)
:type outputs: ``dict``
:param outputs: the outputs that will be returned to playbook/investigation context (originally EntryContext)
:type raw_response: ``dict`` | ``list`` | ``str``
:param raw_response: must be dictionary, if not provided then will be equal to outputs. usually must be the original
raw response from the 3rd party service (originally Contents)
:type timeline: ``dict`` | ``list``
:param timeline: expects a list, if a dict is passed it will be put into a list. used by server to populate an
indicator's timeline. if the 'Category' field is not present in the timeline dict(s), it will automatically
be be added to the dict(s) with its value set to 'Integration Update'.
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: expects a bool value. if true then the warroom entry readable_output will not be auto enriched.
:return: None
:rtype: ``None``
"""
timeline_list = [timeline] if isinstance(timeline, dict) else timeline
if timeline_list:
for tl_obj in timeline_list:
if 'Category' not in tl_obj.keys():
tl_obj['Category'] = 'Integration Update'
return_entry = {
"Type": entryTypes["note"],
"HumanReadable": readable_output,
"ContentsFormat": formats["text"] if isinstance(raw_response, STRING_TYPES) else formats['json'],
"Contents": raw_response,
"EntryContext": outputs,
'IgnoreAutoExtract': ignore_auto_extract,
"IndicatorTimeline": timeline_list
}
# Return 'readable_output' only if needed
if readable_output and not outputs and not raw_response:
return_entry["Contents"] = readable_output
return_entry["ContentsFormat"] = formats["text"]
elif outputs and raw_response is None:
# if raw_response was not provided but outputs were provided then set Contents as outputs
return_entry["Contents"] = outputs
demisto.results(return_entry)
def return_error(message, error='', outputs=None):
"""
Returns error entry with given message and exits the script
:type message: ``str``
:param message: The message to return in the entry (required)
:type error: ``str`` or Exception
:param error: The raw error message to log (optional)
:type outputs: ``dict or None``
:param outputs: the outputs that will be returned to playbook/investigation context (optional)
:return: Error entry object
:rtype: ``dict``
"""
is_command = hasattr(demisto, 'command')
is_server_handled = is_command and demisto.command() in ('fetch-incidents',
'fetch-credentials',
'long-running-execution',
'fetch-indicators')
if is_debug_mode() and not is_server_handled and any(sys.exc_info()): # Checking that an exception occurred
message = "{}\n\n{}".format(message, traceback.format_exc())
message = LOG(message)
if error:
LOG(str(error))
LOG.print_log()
if not isinstance(message, str):
message = message.encode('utf8') if hasattr(message, 'encode') else str(message)
if is_command and demisto.command() == 'get-modified-remote-data':
if (error and not isinstance(error, NotImplementedError)) or sys.exc_info()[0] != NotImplementedError:
message = 'skip update. error: ' + message
if is_server_handled:
raise Exception(message)
else:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': message,
'EntryContext': outputs
})
sys.exit(0)
def return_warning(message, exit=False, warning='', outputs=None, ignore_auto_extract=False):
"""
Returns a warning entry with the specified message, and exits the script.
:type message: ``str``
:param message: The message to return in the entry (required).
:type exit: ``bool``
:param exit: Determines if the program will terminate after the command is executed. Default is False.
:type warning: ``str``
:param warning: The warning message (raw) to log (optional).
:type outputs: ``dict or None``
:param outputs: The outputs that will be returned to playbook/investigation context (optional).
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: Determines if the War Room entry will be auto-enriched. Default is false.
:return: Warning entry object
:rtype: ``dict``
"""
LOG(message)
if warning:
LOG(warning)
LOG.print_log()
demisto.results({
'Type': entryTypes['warning'],
'ContentsFormat': formats['text'],
'IgnoreAutoExtract': ignore_auto_extract,
'Contents': str(message),
"EntryContext": outputs
})
if exit:
sys.exit(0)
def execute_command(command, args, extract_contents=True):
"""
Runs the `demisto.executeCommand()` function and checks for errors.
:type command: ``str``
:param command: The command to run. (required)
:type args: ``dict``
:param args: The command arguments. (required)
:type extract_contents: ``bool``
:param extract_contents: Whether to return only the Contents part of the results. Default is True.
:return: The command results.
:rtype: ``list`` or ``dict`` or ``str``
"""
if not hasattr(demisto, 'executeCommand'):
raise DemistoException('Cannot run demisto.executeCommand() from integrations.')
res = demisto.executeCommand(command, args)
if is_error(res):
return_error('Failed to execute {}. Error details:\n{}'.format(command, get_error(res)))
if not extract_contents:
return res
contents = [entry.get('Contents', {}) for entry in res]
return contents[0] if len(contents) == 1 else contents
def camelize(src, delim=' ', upper_camel=True):
"""
Convert all keys of a dictionary (or list of dictionaries) to CamelCase (with capital first letter)
:type src: ``dict`` or ``list``
:param src: The dictionary (or list of dictionaries) to convert the keys for. (required)
:type delim: ``str``
:param delim: The delimiter between two words in the key (e.g. delim=' ' for "Start Date"). Default ' '.
:type upper_camel: ``bool``
:param upper_camel: When True then transforms dictionary keys to camel case with the first letter capitalised
(for example: demisto_content to DemistoContent), otherwise the first letter will not be capitalised
(for example: demisto_content to demistoContent).
:return: The dictionary (or list of dictionaries) with the keys in CamelCase.
:rtype: ``dict`` or ``list``
"""
def camelize_str(src_str):
if callable(getattr(src_str, "decode", None)):
src_str = src_str.decode('utf-8')
components = src_str.split(delim)
camelize_without_first_char = ''.join(map(lambda x: x.title(), components[1:]))
if upper_camel:
return components[0].title() + camelize_without_first_char
else:
return components[0].lower() + camelize_without_first_char
if isinstance(src, list):
return [camelize(phrase, delim, upper_camel=upper_camel) for phrase in src]
return {camelize_str(key): value for key, value in src.items()}
# Constants for common merge paths
outputPaths = {
'file': 'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || '
'val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512 || '
'val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTPH || '
'val.SSDeep && val.SSDeep == obj.SSDeep)',
'ip': 'IP(val.Address && val.Address == obj.Address)',
'url': 'URL(val.Data && val.Data == obj.Data)',
'domain': 'Domain(val.Name && val.Name == obj.Name)',
'cve': 'CVE(val.ID && val.ID == obj.ID)',
'email': 'Account.Email(val.Address && val.Address == obj.Address)',
'dbotscore': 'DBotScore'
}
def replace_in_keys(src, existing='.', new='_'):
"""
Replace a substring in all of the keys of a dictionary (or list of dictionaries)
:type src: ``dict`` or ``list``
:param src: The dictionary (or list of dictionaries) with keys that need replacement. (required)
:type existing: ``str``
:param existing: substring to replace.
:type new: ``str``
:param new: new substring that will replace the existing substring.
:return: The dictionary (or list of dictionaries) with keys after substring replacement.
:rtype: ``dict`` or ``list``
"""
def replace_str(src_str):
if callable(getattr(src_str, "decode", None)):
src_str = src_str.decode('utf-8')
return src_str.replace(existing, new)
if isinstance(src, list):
return [replace_in_keys(x, existing, new) for x in src]
return {replace_str(k): v for k, v in src.items()}
# ############################## REGEX FORMATTING ###############################
regexFlags = re.M # Multi line matching
# for the global(/g) flag use re.findall({regex_format},str)
# else, use re.match({regex_format},str)
ipv4Regex = r'\b((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b([^\/]|$)'
ipv4cidrRegex = r'\b(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])(?:\[\.\]|\.)){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])(\/([0-9]|[1-2][0-9]|3[0-2]))\b' # noqa: E501
ipv6Regex = r'\b(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:(?:(:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\b' # noqa: E501
ipv6cidrRegex = r'\b(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))\b' # noqa: E501
emailRegex = r'\b[^@]+@[^@]+\.[^@]+\b'
hashRegex = r'\b[0-9a-fA-F]+\b'
urlRegex = r'(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?' \
r'(?:(?:\/|\?)[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?'
cveRegex = r'(?i)^cve-\d{4}-([1-9]\d{4,}|\d{4})$'
md5Regex = re.compile(r'\b[0-9a-fA-F]{32}\b', regexFlags)
sha1Regex = re.compile(r'\b[0-9a-fA-F]{40}\b', regexFlags)
sha256Regex = re.compile(r'\b[0-9a-fA-F]{64}\b', regexFlags)
sha512Regex = re.compile(r'\b[0-9a-fA-F]{128}\b', regexFlags)
pascalRegex = re.compile('([A-Z]?[a-z]+)')
# ############################## REGEX FORMATTING end ###############################
def underscoreToCamelCase(s, upper_camel=True):
"""
Convert an underscore separated string to camel case
:type s: ``str``
:param s: The string to convert (e.g. hello_world) (required)
:type upper_camel: ``bool``
:param upper_camel: When True then transforms dictionarykeys to camel case with the first letter capitalised
(for example: demisto_content to DemistoContent), otherwise the first letter will not be capitalised
(for example: demisto_content to demistoContent).
:return: The converted string (e.g. HelloWorld)
:rtype: ``str``
"""
if not isinstance(s, STRING_OBJ_TYPES):
return s
components = s.split('_')
camel_without_first_char = ''.join(x.title() for x in components[1:])
if upper_camel:
return components[0].title() + camel_without_first_char
else:
return components[0].lower() + camel_without_first_char
def camel_case_to_underscore(s):
"""Converts a camelCase string to snake_case
:type s: ``str``
:param s: The string to convert (e.g. helloWorld) (required)
:return: The converted string (e.g. hello_world)
:rtype: ``str``
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snakify(src):
"""Convert all keys of a dictionary to snake_case (underscored separated)
:type src: ``dict``
:param src: The dictionary to convert the keys for. (required)
:return: The dictionary (or list of dictionaries) with the keys in CamelCase.
:rtype: ``dict``
"""
return {camel_case_to_underscore(k): v for k, v in src.items()}
def pascalToSpace(s):
"""
Converts pascal strings to human readable (e.g. "ThreatScore" -> "Threat Score", "thisIsIPAddressName" ->
"This Is IP Address Name"). Could be used as headerTransform
:type s: ``str``
:param s: The string to be converted (required)
:return: The converted string
:rtype: ``str``
"""
if not isinstance(s, STRING_OBJ_TYPES):
return s
tokens = pascalRegex.findall(s)
for t in tokens:
# double space to handle capital words like IP/URL/DNS that not included in the regex
s = s.replace(t, ' {} '.format(t.title()))
# split and join: to remove double spacing caused by previous workaround
s = ' '.join(s.split())
return s
def string_to_table_header(string):
"""
Checks if string, change underscores to spaces, capitalize every word.
Example: "one_two" to "One Two"
:type string: ``str``
:param string: The string to be converted (required)
:return: The converted string
:rtype: ``str``
"""
if isinstance(string, STRING_OBJ_TYPES):
return " ".join(word.capitalize() for word in string.replace("_", " ").split())
else:
raise Exception('The key is not a string: {}'.format(string))
def string_to_context_key(string):
"""
Checks if string, removes underscores, capitalize every word.
Example: "one_two" to "OneTwo"
:type string: ``str``
:param string: The string to be converted (required)
:return: The converted string
:rtype: ``str``
"""
if isinstance(string, STRING_OBJ_TYPES):
return "".join(word.capitalize() for word in string.split('_'))
else:
raise Exception('The key is not a string: {}'.format(string))
def parse_date_range(date_range, date_format=None, to_timestamp=False, timezone=0, utc=True):
"""
THIS FUNCTTION IS DEPRECATED - USE dateparser.parse instead
Parses date_range string to a tuple date strings (start, end). Input must be in format 'number date_range_unit')
Examples: (2 hours, 4 minutes, 6 month, 1 day, etc.)
:type date_range: ``str``
:param date_range: The date range to be parsed (required)
:type date_format: ``str``
:param date_format: Date format to convert the date_range to. (optional)
:type to_timestamp: ``bool``
:param to_timestamp: If set to True, then will return time stamp rather than a datetime.datetime. (optional)
:type timezone: ``int``
:param timezone: timezone should be passed in hours (e.g if +0300 then pass 3, if -0200 then pass -2).
:type utc: ``bool``
:param utc: If set to True, utc time will be used, otherwise local time.
:return: The parsed date range.
:rtype: ``(datetime.datetime, datetime.datetime)`` or ``(int, int)`` or ``(str, str)``
"""
range_split = date_range.strip().split(' ')
if len(range_split) != 2:
return_error('date_range must be "number date_range_unit", examples: (2 hours, 4 minutes,6 months, 1 day, '
'etc.)')
try:
number = int(range_split[0])
except ValueError:
return_error('The time value is invalid. Must be an integer.')
unit = range_split[1].lower()
if unit not in ['minute', 'minutes',
'hour', 'hours',
'day', 'days',
'month', 'months',
'year', 'years',
]:
return_error('The unit of date_range is invalid. Must be minutes, hours, days, months or years.')
if not isinstance(timezone, (int, float)):
return_error('Invalid timezone "{}" - must be a number (of type int or float).'.format(timezone))
if utc:
end_time = datetime.utcnow() + timedelta(hours=timezone)
start_time = datetime.utcnow() + timedelta(hours=timezone)
else:
end_time = datetime.now() + timedelta(hours=timezone)
start_time = datetime.now() + timedelta(hours=timezone)
if 'minute' in unit:
start_time = end_time - timedelta(minutes=number)
elif 'hour' in unit:
start_time = end_time - timedelta(hours=number)
elif 'day' in unit:
start_time = end_time - timedelta(days=number)
elif 'month' in unit:
start_time = end_time - timedelta(days=number * 30)
elif 'year' in unit:
start_time = end_time - timedelta(days=number * 365)
if to_timestamp:
return date_to_timestamp(start_time), date_to_timestamp(end_time)
if date_format:
return datetime.strftime(start_time, date_format), datetime.strftime(end_time, date_format)
return start_time, end_time
def timestamp_to_datestring(timestamp, date_format="%Y-%m-%dT%H:%M:%S.000Z", is_utc=False):
"""
Parses timestamp (milliseconds) to a date string in the provided date format (by default: ISO 8601 format)
Examples: (1541494441222, 1541495441000, etc.)
:type timestamp: ``int`` or ``str``
:param timestamp: The timestamp to be parsed (required)
:type date_format: ``str``
:param date_format: The date format the timestamp should be parsed to. (optional)
:type is_utc: ``bool``
:param is_utc: Should the string representation of the timestamp use UTC time or the local machine time
:return: The parsed timestamp in the date_format
:rtype: ``str``
"""
use_utc_time = is_utc or date_format.endswith('Z')
if use_utc_time:
return datetime.utcfromtimestamp(int(timestamp) / 1000.0).strftime(date_format)
return datetime.fromtimestamp(int(timestamp) / 1000.0).strftime(date_format)
def date_to_timestamp(date_str_or_dt, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses date_str_or_dt in the given format (default: %Y-%m-%dT%H:%M:%S) to milliseconds
Examples: ('2018-11-06T08:56:41', '2018-11-06T08:56:41', etc.)
:type date_str_or_dt: ``str`` or ``datetime.datetime``
:param date_str_or_dt: The date to be parsed. (required)
:type date_format: ``str``
:param date_format: The date format of the date string (will be ignored if date_str_or_dt is of type
datetime.datetime). (optional)
:return: The parsed timestamp.
:rtype: ``int``
"""
if isinstance(date_str_or_dt, STRING_OBJ_TYPES):
return int(time.mktime(time.strptime(date_str_or_dt, date_format)) * 1000)
# otherwise datetime.datetime
return int(time.mktime(date_str_or_dt.timetuple()) * 1000)
def remove_nulls_from_dictionary(data):
"""
Remove Null values from a dictionary. (updating the given dictionary)
:type data: ``dict``
:param data: The data to be added to the context (required)
:return: No data returned
:rtype: ``None``
"""
list_of_keys = list(data.keys())[:]
for key in list_of_keys:
if data[key] in ('', None, [], {}, ()):
del data[key]
def assign_params(keys_to_ignore=None, values_to_ignore=None, **kwargs):
"""Creates a dictionary from given kwargs without empty values.
empty values are: None, '', [], {}, ()
` Examples:
>>> assign_params(a='1', b=True, c=None, d='')
{'a': '1', 'b': True}
>>> since_time = 'timestamp'
>>> assign_params(values_to_ignore=(15, ), sinceTime=since_time, b=15)
{'sinceTime': 'timestamp'}
>>> item_id = '1236654'
>>> assign_params(keys_to_ignore=['rnd'], ID=item_id, rnd=15)
{'ID': '1236654'}
:type keys_to_ignore: ``tuple`` or ``list``
:param keys_to_ignore: Keys to ignore if exists
:type values_to_ignore: ``tuple`` or ``list``
:param values_to_ignore: Values to ignore if exists
:type kwargs: ``kwargs``
:param kwargs: kwargs to filter
:return: dict without empty values
:rtype: ``dict``
"""
if values_to_ignore is None:
values_to_ignore = (None, '', [], {}, ())
if keys_to_ignore is None:
keys_to_ignore = tuple()
return {
key: value for key, value in kwargs.items()
if value not in values_to_ignore and key not in keys_to_ignore
}
class GetDemistoVersion:
"""
Callable class to replace get_demisto_version function
"""
def __init__(self):
self._version = None
def __call__(self):
"""Returns the Demisto version and build number.
:return: Demisto version object if Demisto class has attribute demistoVersion, else raises AttributeError
:rtype: ``dict``
"""
if self._version is None:
if hasattr(demisto, 'demistoVersion'):
self._version = demisto.demistoVersion()
else:
raise AttributeError('demistoVersion attribute not found.')
return self._version
get_demisto_version = GetDemistoVersion()
def get_demisto_version_as_str():
"""Get the Demisto Server version as a string <version>-<build>. If unknown will return: 'Unknown'.
Meant to be use in places where we want to display the version. If you want to perform logic based upon vesrion
use: is_demisto_version_ge.
:return: Demisto version as string
:rtype: ``dict``
"""
try:
ver_obj = get_demisto_version()
return '{}-{}'.format(ver_obj.get('version', 'Unknown'),
ver_obj.get("buildNumber", 'Unknown'))
except AttributeError:
return "Unknown"
def is_demisto_version_ge(version, build_number=''):
"""Utility function to check if current running integration is at a server greater or equal to the passed version
:type version: ``str``
:param version: Version to check
:type build_number: ``str``
:param build_number: Build number to check
:return: True if running within a Server version greater or equal than the passed version
:rtype: ``bool``
"""
server_version = {}
try:
server_version = get_demisto_version()
if server_version.get('version') > version:
return True
elif server_version.get('version') == version:
if build_number:
return int(server_version.get('buildNumber')) >= int(build_number) # type: ignore[arg-type]
return True # No build number
else:
return False
except AttributeError:
# demistoVersion was added in 5.0.0. We are currently running in 4.5.0 and below
if version >= "5.0.0":
return False
raise
except ValueError:
# dev editions are not comparable
demisto.log(
'is_demisto_version_ge: ValueError. \n '
'input: server version: {} build number: {}\n'
'server version: {}'.format(version, build_number, server_version)
)
return True
class DemistoHandler(logging.Handler):
"""
Handler to route logging messages to an IntegrationLogger or demisto.debug if not supplied
"""
def __init__(self, int_logger=None):
logging.Handler.__init__(self)
self.int_logger = int_logger
def emit(self, record):
msg = self.format(record)
try:
if self.int_logger:
self.int_logger(msg)
else:
demisto.debug(msg)
except Exception: # noqa: disable=broad-except
pass
class DebugLogger(object):
"""
Wrapper to initiate logging at logging.DEBUG level.
Is used when `debug-mode=True`.
"""
def __init__(self):
self.handler = None # just in case our http_client code throws an exception. so we don't error in the __del__
self.int_logger = IntegrationLogger()
self.int_logger.set_buffering(False)
self.http_client_print = None
self.http_client = None
if IS_PY3:
# pylint: disable=import-error
import http.client as http_client
# pylint: enable=import-error
self.http_client = http_client
self.http_client.HTTPConnection.debuglevel = 1
self.http_client_print = getattr(http_client, 'print', None) # save in case someone else patched it already
setattr(http_client, 'print', self.int_logger.print_override)
self.handler = DemistoHandler(self.int_logger)
demisto_formatter = logging.Formatter(fmt='python logging: %(levelname)s [%(name)s] - %(message)s', datefmt=None)
self.handler.setFormatter(demisto_formatter)
self.root_logger = logging.getLogger()
self.prev_log_level = self.root_logger.getEffectiveLevel()
self.root_logger.setLevel(logging.DEBUG)
self.org_handlers = list()
if self.root_logger.handlers:
self.org_handlers.extend(self.root_logger.handlers)
for h in self.org_handlers:
self.root_logger.removeHandler(h)
self.root_logger.addHandler(self.handler)
def __del__(self):
if self.handler:
self.root_logger.setLevel(self.prev_log_level)
self.root_logger.removeHandler(self.handler)
self.handler.flush()
self.handler.close()
if self.org_handlers:
for h in self.org_handlers:
self.root_logger.addHandler(h)
if self.http_client:
self.http_client.HTTPConnection.debuglevel = 0
if self.http_client_print:
setattr(self.http_client, 'print', self.http_client_print)
else:
delattr(self.http_client, 'print')
if self.int_logger.curl:
for curl in self.int_logger.curl:
demisto.info('cURL:\n' + curl)
def log_start_debug(self):
"""
Utility function to log start of debug mode logging
"""
msg = "debug-mode started.\n#### http client print found: {}.\n#### Env {}.".format(self.http_client_print is not None,
os.environ)
if hasattr(demisto, 'params'):
msg += "\n#### Params: {}.".format(json.dumps(demisto.params(), indent=2))
calling_context = demisto.callingContext.get('context', {})
msg += "\n#### Docker image: [{}]".format(calling_context.get('DockerImage'))
brand = calling_context.get('IntegrationBrand')
if brand:
msg += "\n#### Integration: brand: [{}] instance: [{}]".format(brand, calling_context.get('IntegrationInstance'))
sm = get_schedule_metadata(context=calling_context)
if sm.get('is_polling'):
msg += "\n#### Schedule Metadata: scheduled command: [{}] args: [{}] times ran: [{}] scheduled: [{}] end " \
"date: [{}]".format(sm.get('polling_command'),
sm.get('polling_args'),
sm.get('times_ran'),
sm.get('start_date'),
sm.get('end_date')
)
self.int_logger.write(msg)
_requests_logger = None
try:
if is_debug_mode():
_requests_logger = DebugLogger()
_requests_logger.log_start_debug()
except Exception as ex:
# Should fail silently so that if there is a problem with the logger it will
# not affect the execution of commands and playbooks
demisto.info('Failed initializing DebugLogger: {}'.format(ex))
def parse_date_string(date_string, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses the date_string function to the corresponding datetime object.
Note: If possible (e.g. running Python 3), it is suggested to use
dateutil.parser.parse or dateparser.parse functions instead.
Examples:
>>> parse_date_string('2019-09-17T06:16:39Z')
datetime.datetime(2019, 9, 17, 6, 16, 39)
>>> parse_date_string('2019-09-17T06:16:39.22Z')
datetime.datetime(2019, 9, 17, 6, 16, 39, 220000)
>>> parse_date_string('2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00')
datetime.datetime(2019, 9, 17, 6, 16, 39, 404000)
:type date_string: ``str``
:param date_string: The date string to parse. (required)
:type date_format: ``str``
:param date_format:
The date format of the date string. If the date format is known, it should be provided. (optional)
:return: The parsed datetime.
:rtype: ``(datetime.datetime, datetime.datetime)``
"""
try:
return datetime.strptime(date_string, date_format)
except ValueError as e:
error_message = str(e)
date_format = '%Y-%m-%dT%H:%M:%S'
time_data_regex = r'time data \'(.*?)\''
time_data_match = re.findall(time_data_regex, error_message)
sliced_time_data = ''
if time_data_match:
# found time date which does not match date format
# example of caught error message:
# "time data '2019-09-17T06:16:39Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'"
time_data = time_data_match[0]
# removing YYYY-MM-DDThh:mm:ss from the time data to keep only milliseconds and time zone
sliced_time_data = time_data[19:]
else:
unconverted_data_remains_regex = r'unconverted data remains: (.*)'
unconverted_data_remains_match = re.findall(unconverted_data_remains_regex, error_message)
if unconverted_data_remains_match:
# found unconverted_data_remains
# example of caught error message:
# "unconverted data remains: 22Z"
sliced_time_data = unconverted_data_remains_match[0]
if not sliced_time_data:
# did not catch expected error
raise ValueError(e)
if '.' in sliced_time_data:
# found milliseconds - appending ".%f" to date format
date_format += '.%f'
timezone_regex = r'[Zz+-].*'
time_zone = re.findall(timezone_regex, sliced_time_data)
if time_zone:
# found timezone - appending it to the date format
date_format += time_zone[0]
return datetime.strptime(date_string, date_format)
def build_dbot_entry(indicator, indicator_type, vendor, score, description=None, build_malicious=True):
"""Build a dbot entry. if score is 3 adds malicious
Examples:
>>> build_dbot_entry('[email protected]', 'Email', 'Vendor', 1)
{'DBotScore': {'Indicator': '[email protected]', 'Type': 'email', 'Vendor': 'Vendor', 'Score': 1}}
>>> build_dbot_entry('[email protected]', 'Email', 'Vendor', 3, build_malicious=False)
{'DBotScore': {'Indicator': '[email protected]', 'Type': 'email', 'Vendor': 'Vendor', 'Score': 3}}
>>> build_dbot_entry('[email protected]', 'email', 'Vendor', 3, 'Malicious email')
{'DBotScore': {'Vendor': 'Vendor', 'Indicator': '[email protected]', 'Score': 3, 'Type': 'email'}, \
'Account.Email(val.Address && val.Address == obj.Address)': {'Malicious': {'Vendor': 'Vendor', 'Description': \
'Malicious email'}, 'Address': '[email protected]'}}
>>> build_dbot_entry('md5hash', 'md5', 'Vendor', 1)
{'DBotScore': {'Indicator': 'md5hash', 'Type': 'file', 'Vendor': 'Vendor', 'Score': 1}}
:type indicator: ``str``
:param indicator: indicator field. if using file hashes, can be dict
:type indicator_type: ``str``
:param indicator_type:
type of indicator ('url, 'domain', 'ip', 'cve', 'email', 'md5', 'sha1', 'sha256', 'crc32', 'sha512', 'ctph')
:type vendor: ``str``
:param vendor: Integration ID
:type score: ``int``
:param score: DBot score (0-3)
:type description: ``str`` or ``None``
:param description: description (will be added to malicious if dbot_score is 3). can be None
:type build_malicious: ``bool``
:param build_malicious: if True, will add a malicious entry
:return: dbot entry
:rtype: ``dict``
"""
if not 0 <= score <= 3:
raise DemistoException('illegal DBot score, expected 0-3, got `{}`'.format(score))
indicator_type_lower = indicator_type.lower()
if indicator_type_lower not in INDICATOR_TYPE_TO_CONTEXT_KEY:
raise DemistoException('illegal indicator type, expected one of {}, got `{}`'.format(
INDICATOR_TYPE_TO_CONTEXT_KEY.keys(), indicator_type_lower
))
# handle files
if INDICATOR_TYPE_TO_CONTEXT_KEY[indicator_type_lower] == 'file':
indicator_type_lower = 'file'
dbot_entry = {
outputPaths['dbotscore']: {
'Indicator': indicator,
'Type': indicator_type_lower,
'Vendor': vendor,
'Score': score
}
}
if score == 3 and build_malicious:
dbot_entry.update(build_malicious_dbot_entry(indicator, indicator_type, vendor, description))
return dbot_entry
def build_malicious_dbot_entry(indicator, indicator_type, vendor, description=None):
""" Build Malicious dbot entry
Examples:
>>> build_malicious_dbot_entry('8.8.8.8', 'ip', 'Vendor', 'Google DNS')
{'IP(val.Address && val.Address == obj.Address)': {'Malicious': {'Vendor': 'Vendor', 'Description': 'Google DNS\
'}, 'Address': '8.8.8.8'}}
>>> build_malicious_dbot_entry('md5hash', 'MD5', 'Vendor', 'Malicious File')
{'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || val.SHA256 && val.SHA256 == obj.SHA\
256 || val.SHA512 && val.SHA512 == obj.SHA512 || val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTP\
H || val.SSDeep && val.SSDeep == obj.SSDeep)': {'Malicious': {'Vendor': 'Vendor', 'Description': 'Malicious File'}\
, 'MD5': 'md5hash'}}
:type indicator: ``str``
:param indicator: Value (e.g. 8.8.8.8)
:type indicator_type: ``str``
:param indicator_type: e.g. 'IP'
:type vendor: ``str``
:param vendor: Integration ID
:type description: ``str``
:param description: Why it's malicious
:return: A malicious DBot entry
:rtype: ``dict``
"""
indicator_type_lower = indicator_type.lower()
if indicator_type_lower in INDICATOR_TYPE_TO_CONTEXT_KEY:
key = INDICATOR_TYPE_TO_CONTEXT_KEY[indicator_type_lower]
# `file` indicator works a little different
if key == 'file':
entry = {
indicator_type.upper(): indicator,
'Malicious': {
'Vendor': vendor,
'Description': description
}
}
return {outputPaths[key]: entry}
else:
entry = {
key: indicator,
'Malicious': {
'Vendor': vendor,
'Description': description
}
}
return {outputPaths[indicator_type_lower]: entry}
else:
raise DemistoException('Wrong indicator type supplied: {}, expected {}'
.format(indicator_type, INDICATOR_TYPE_TO_CONTEXT_KEY.keys()))
# Will add only if 'requests' module imported
if 'requests' in sys.modules:
class BaseClient(object):
"""Client to use in integrations with powerful _http_request
:type base_url: ``str``
:param base_url: Base server address with suffix, for example: https://example.com/api/v2/.
:type verify: ``bool``
:param verify: Whether the request should verify the SSL certificate.
:type proxy: ``bool``
:param proxy: Whether to run the integration using the system proxy.
:type ok_codes: ``tuple``
:param ok_codes:
The request codes to accept as OK, for example: (200, 201, 204).
If you specify "None", will use requests.Response.ok
:type headers: ``dict``
:param headers:
The request headers, for example: {'Accept`: `application/json`}.
Can be None.
:type auth: ``dict`` or ``tuple``
:param auth:
The request authorization, for example: (username, password).
Can be None.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, base_url, verify=True, proxy=False, ok_codes=tuple(), headers=None, auth=None):
self._base_url = base_url
self._verify = verify
self._ok_codes = ok_codes
self._headers = headers
self._auth = auth
self._session = requests.Session()
if not proxy:
skip_proxy()
if not verify:
skip_cert_verification()
def __del__(self):
try:
self._session.close()
except Exception: # noqa
demisto.debug('failed to close BaseClient session with the following error:\n{}'.format(traceback.format_exc()))
def _implement_retry(self, retries=0,
status_list_to_retry=None,
backoff_factor=5,
raise_on_redirect=False,
raise_on_status=False):
"""
Implements the retry mechanism.
In the default case where retries = 0 the request will fail on the first time
:type retries: ``int``
:param retries: How many retries should be made in case of a failure. when set to '0'- will fail on the first time
:type status_list_to_retry: ``iterable``
:param status_list_to_retry: A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ['GET', 'POST', 'PUT']
and the response status code is in ``status_list_to_retry``.
:type backoff_factor ``float``
:param backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff_factor set to 5
:type raise_on_redirect ``bool``
:param raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:type raise_on_status ``bool``
:param raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
"""
try:
method_whitelist = "allowed_methods" if hasattr(Retry.DEFAULT, "allowed_methods") else "method_whitelist"
whitelist_kawargs = {
method_whitelist: frozenset(['GET', 'POST', 'PUT'])
}
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status=retries,
status_forcelist=status_list_to_retry,
raise_on_status=raise_on_status,
raise_on_redirect=raise_on_redirect,
**whitelist_kawargs
)
adapter = HTTPAdapter(max_retries=retry)
self._session.mount('http://', adapter)
self._session.mount('https://', adapter)
except NameError:
pass
def _http_request(self, method, url_suffix='', full_url=None, headers=None, auth=None, json_data=None,
params=None, data=None, files=None, timeout=10, resp_type='json', ok_codes=None,
return_empty_response=False, retries=0, status_list_to_retry=None,
backoff_factor=5, raise_on_redirect=False, raise_on_status=False,
error_handler=None, empty_valid_codes=None, **kwargs):
"""A wrapper for requests lib to send our requests and handle requests and responses better.
:type method: ``str``
:param method: The HTTP method, for example: GET, POST, and so on.
:type url_suffix: ``str``
:param url_suffix: The API endpoint.
:type full_url: ``str``
:param full_url:
Bypasses the use of self._base_url + url_suffix. This is useful if you need to
make a request to an address outside of the scope of the integration
API.
:type headers: ``dict``
:param headers: Headers to send in the request. If None, will use self._headers.
:type auth: ``tuple``
:param auth:
The authorization tuple (usually username/password) to enable Basic/Digest/Custom HTTP Auth.
if None, will use self._auth.
:type params: ``dict``
:param params: URL parameters to specify the query.
:type data: ``dict``
:param data: The data to send in a 'POST' request.
:type json_data: ``dict``
:param json_data: The dictionary to send in a 'POST' request.
:type files: ``dict``
:param files: The file data to send in a 'POST' request.
:type timeout: ``float`` or ``tuple``
:param timeout:
The amount of time (in seconds) that a request will wait for a client to
establish a connection to a remote machine before a timeout occurs.
can be only float (Connection Timeout) or a tuple (Connection Timeout, Read Timeout).
:type resp_type: ``str``
:param resp_type:
Determines which data format to return from the HTTP request. The default
is 'json'. Other options are 'text', 'content', 'xml' or 'response'. Use 'response'
to return the full response object.
:type ok_codes: ``tuple``
:param ok_codes:
The request codes to accept as OK, for example: (200, 201, 204). If you specify
"None", will use self._ok_codes.
:return: Depends on the resp_type parameter
:rtype: ``dict`` or ``str`` or ``requests.Response``
:type retries: ``int``
:param retries: How many retries should be made in case of a failure. when set to '0'- will fail on the first time
:type status_list_to_retry: ``iterable``
:param status_list_to_retry: A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ['GET', 'POST', 'PUT']
and the response status code is in ``status_list_to_retry``.
:type backoff_factor ``float``
:param backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff_factor set to 5
:type raise_on_redirect ``bool``
:param raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:type raise_on_status ``bool``
:param raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:type error_handler ``callable``
:param error_handler: Given an error entery, the error handler outputs the
new formatted error message.
:type empty_valid_codes: ``list``
:param empty_valid_codes: A list of all valid status codes of empty responses (usually only 204, but
can vary)
"""
try:
# Replace params if supplied
address = full_url if full_url else urljoin(self._base_url, url_suffix)
headers = headers if headers else self._headers
auth = auth if auth else self._auth
if retries:
self._implement_retry(retries, status_list_to_retry, backoff_factor, raise_on_redirect, raise_on_status)
# Execute
res = self._session.request(
method,
address,
verify=self._verify,
params=params,
data=data,
json=json_data,
files=files,
headers=headers,
auth=auth,
timeout=timeout,
**kwargs
)
# Handle error responses gracefully
if not self._is_status_code_valid(res, ok_codes):
if error_handler:
error_handler(res)
else:
err_msg = 'Error in API call [{}] - {}' \
.format(res.status_code, res.reason)
try:
# Try to parse json error response
error_entry = res.json()
err_msg += '\n{}'.format(json.dumps(error_entry))
raise DemistoException(err_msg, res=res)
except ValueError:
err_msg += '\n{}'.format(res.text)
raise DemistoException(err_msg, res=res)
if not empty_valid_codes:
empty_valid_codes = [204]
is_response_empty_and_successful = (res.status_code in empty_valid_codes)
if is_response_empty_and_successful and return_empty_response:
return res
resp_type = resp_type.lower()
try:
if resp_type == 'json':
return res.json()
if resp_type == 'text':
return res.text
if resp_type == 'content':
return res.content
if resp_type == 'xml':
ET.parse(res.text)
return res
except ValueError as exception:
raise DemistoException('Failed to parse json object from response: {}'
.format(res.content), exception)
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
# in case the "Trust any certificate" is already checked
if not self._verify:
raise
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = 'Verify that the server URL parameter' \
' is correct and that you have access to the server from your host.' \
'\nError Type: {}\nError Number: [{}]\nMessage: {}\n' \
.format(err_type, exception.errno, exception.strerror)
raise DemistoException(err_msg, exception)
except requests.exceptions.RetryError as exception:
try:
reason = 'Reason: {}'.format(exception.args[0].reason.args[0])
except Exception: # noqa: disable=broad-except
reason = ''
err_msg = 'Max Retries Error- Request attempts with {} retries failed. \n{}'.format(retries, reason)
raise DemistoException(err_msg, exception)
def _is_status_code_valid(self, response, ok_codes=None):
"""If the status code is OK, return 'True'.
:type response: ``requests.Response``
:param response: Response from API after the request for which to check the status.
:type ok_codes: ``tuple`` or ``list``
:param ok_codes:
The request codes to accept as OK, for example: (200, 201, 204). If you specify
"None", will use response.ok.
:return: Whether the status of the response is valid.
:rtype: ``bool``
"""
# Get wanted ok codes
status_codes = ok_codes if ok_codes else self._ok_codes
if status_codes:
return response.status_code in status_codes
return response.ok
def batch(iterable, batch_size=1):
"""Gets an iterable and yields slices of it.
:type iterable: ``list``
:param iterable: list or other iterable object.
:type batch_size: ``int``
:param batch_size: the size of batches to fetch
:rtype: ``list``
:return:: Iterable slices of given
"""
current_batch = iterable[:batch_size]
not_batched = iterable[batch_size:]
while current_batch:
yield current_batch
current_batch = not_batched[:batch_size]
not_batched = not_batched[batch_size:]
def dict_safe_get(dict_object, keys, default_return_value=None, return_type=None, raise_return_type=True):
"""Recursive safe get query (for nested dicts and lists), If keys found return value otherwise return None or default value.
Example:
>>> data = {"something" : {"test": "A"}}
>>> dict_safe_get(data, ['something', 'test'])
>>> 'A'
>>> dict_safe_get(data, ['something', 'else'], 'default value')
>>> 'default value'
:type dict_object: ``dict``
:param dict_object: dictionary to query.
:type keys: ``list``
:param keys: keys for recursive get.
:type default_return_value: ``object``
:param default_return_value: Value to return when no key available.
:type return_type: ``type``
:param return_type: Excepted return type.
:type raise_return_type: ``bool``
:param raise_return_type: Whether to raise an error when the value didn't match the expected return type.
:rtype: ``object``
:return:: Value from nested query.
"""
return_value = dict_object
for key in keys:
try:
return_value = return_value[key]
except (KeyError, TypeError, IndexError, AttributeError):
return_value = default_return_value
break
if return_type and not isinstance(return_value, return_type):
if raise_return_type:
raise TypeError("Safe get Error:\nDetails: Return Type Error Excepted return type {0},"
" but actual type from nested dict/list is {1} with value {2}.\n"
"Query: {3}\nQueried object: {4}".format(return_type, type(return_value),
return_value, keys, dict_object))
return_value = default_return_value
return return_value
CONTEXT_UPDATE_RETRY_TIMES = 3
MIN_VERSION_FOR_VERSIONED_CONTEXT = '6.0.0'
def merge_lists(original_list, updated_list, key):
"""
Replace values in a list with those in an updated list.
Example:
>>> original = [{'id': '1', 'updated': 'n'}, {'id': '2', 'updated': 'n'}, {'id': '11', 'updated': 'n'}]
>>> updated = [{'id': '1', 'updated': 'y'}, {'id': '3', 'updated': 'y'}, {'id': '11', 'updated': 'n',
>>> 'remove': True}]
>>> result = [{'id': '1', 'updated': 'y'}, {'id': '2', 'updated': 'n'}, {'id': '3', 'updated': 'y'}]
:type original_list: ``list``
:param original_list: The original list.
:type updated_list: ``list``
:param updated_list: The updated list.
:type key: ``str``
:param key: The key to replace elements by.
:rtype: ``list``
:return: The merged list.
"""
original_dict = {element[key]: element for element in original_list}
updated_dict = {element[key]: element for element in updated_list}
original_dict.update(updated_dict)
removed = [obj for obj in original_dict.values() if obj.get('remove', False) is True]
for r in removed:
demisto.debug('Removing from integration context: {}'.format(str(r)))
merged_list = [obj for obj in original_dict.values() if obj.get('remove', False) is False]
return merged_list
def set_integration_context(context, sync=True, version=-1):
"""
Sets the integration context.
:type context: ``dict``
:param context: The context to set.
:type sync: ``bool``
:param sync: Whether to save the context directly to the DB.
:type version: ``Any``
:param version: The version of the context to set.
:rtype: ``dict``
:return: The new integration context
"""
demisto.debug('Setting integration context')
if is_versioned_context_available():
demisto.debug('Updating integration context with version {}. Sync: {}'.format(version, sync))
return demisto.setIntegrationContextVersioned(context, version, sync)
else:
return demisto.setIntegrationContext(context)
def get_integration_context(sync=True, with_version=False):
"""
Gets the integration context.
:type sync: ``bool``
:param sync: Whether to get the integration context directly from the DB.
:type with_version: ``bool``
:param with_version: Whether to return the version.
:rtype: ``dict``
:return: The integration context.
"""
if is_versioned_context_available():
integration_context = demisto.getIntegrationContextVersioned(sync)
if with_version:
return integration_context
else:
return integration_context.get('context', {})
else:
return demisto.getIntegrationContext()
def is_versioned_context_available():
"""
Determines whether versioned integration context is available according to the server version.
:rtype: ``bool``
:return: Whether versioned integration context is available
"""
return is_demisto_version_ge(MIN_VERSION_FOR_VERSIONED_CONTEXT)
def set_to_integration_context_with_retries(context, object_keys=None, sync=True,
max_retry_times=CONTEXT_UPDATE_RETRY_TIMES):
"""
Update the integration context with a dictionary of keys and values with multiple attempts.
The function supports merging the context keys using the provided object_keys parameter.
If the version is too old by the time the context is set,
another attempt will be made until the limit after a random sleep.
:type context: ``dict``
:param context: A dictionary of keys and values to set.
:type object_keys: ``dict``
:param object_keys: A dictionary to map between context keys and their unique ID for merging them.
:type sync: ``bool``
:param sync: Whether to save the context directly to the DB.
:type max_retry_times: ``int``
:param max_retry_times: The maximum number of attempts to try.
:rtype: ``None``
:return: None
"""
attempt = 0
# do while...
while True:
if attempt == max_retry_times:
raise Exception('Failed updating integration context. Max retry attempts exceeded.')
# Update the latest context and get the new version
integration_context, version = update_integration_context(context, object_keys, sync)
demisto.debug('Attempting to update the integration context with version {}.'.format(version))
# Attempt to update integration context with a version.
# If we get a ValueError (DB Version), then the version was not updated and we need to try again.
attempt += 1
try:
set_integration_context(integration_context, sync, version)
demisto.debug('Successfully updated integration context with version {}.'
''.format(version))
break
except ValueError as ve:
demisto.debug('Failed updating integration context with version {}: {} Attempts left - {}'
''.format(version, str(ve), CONTEXT_UPDATE_RETRY_TIMES - attempt))
# Sleep for a random time
time_to_sleep = randint(1, 100) / 1000
time.sleep(time_to_sleep)
def get_integration_context_with_version(sync=True):
"""
Get the latest integration context with version, if available.
:type sync: ``bool``
:param sync: Whether to get the context directly from the DB.
:rtype: ``tuple``
:return: The latest integration context with version.
"""
latest_integration_context_versioned = get_integration_context(sync, with_version=True)
version = -1
if is_versioned_context_available():
integration_context = latest_integration_context_versioned.get('context', {})
if sync:
version = latest_integration_context_versioned.get('version', 0)
else:
integration_context = latest_integration_context_versioned
return integration_context, version
def update_integration_context(context, object_keys=None, sync=True):
"""
Update the integration context with a given dictionary after merging it with the latest integration context.
:type context: ``dict``
:param context: The keys and values to update in the integration context.
:type object_keys: ``dict``
:param object_keys: A dictionary to map between context keys and their unique ID for merging them
with the latest context.
:type sync: ``bool``
:param sync: Whether to use the context directly from the DB.
:rtype: ``tuple``
:return: The updated integration context along with the current version.
"""
integration_context, version = get_integration_context_with_version(sync)
if not object_keys:
object_keys = {}
for key, _ in context.items():
latest_object = json.loads(integration_context.get(key, '[]'))
updated_object = context[key]
if key in object_keys:
merged_list = merge_lists(latest_object, updated_object, object_keys[key])
integration_context[key] = json.dumps(merged_list)
else:
integration_context[key] = json.dumps(updated_object)
return integration_context, version
class DemistoException(Exception):
def __init__(self, message, exception=None, res=None, *args):
self.res = res
self.message = message
self.exception = exception
super(DemistoException, self).__init__(message, exception, *args)
def __str__(self):
return str(self.message)
class GetRemoteDataArgs:
"""get-remote-data args parser
:type args: ``dict``
:param args: arguments for the command.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, args):
self.remote_incident_id = args['id']
self.last_update = args['lastUpdate']
class GetModifiedRemoteDataArgs:
"""get-modified-remote-data args parser
:type args: ``dict``
:param args: arguments for the command.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, args):
self.last_update = args['lastUpdate']
class UpdateRemoteSystemArgs:
"""update-remote-system args parser
:type args: ``dict``
:param args: arguments for the command of the command.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, args):
self.data = args.get('data') # type: ignore
self.entries = args.get('entries')
self.incident_changed = args.get('incidentChanged')
self.remote_incident_id = args.get('remoteId')
self.inc_status = args.get('status')
self.delta = args.get('delta')
class GetRemoteDataResponse:
"""get-remote-data response parser
:type mirrored_object: ``dict``
:param mirrored_object: The object you are mirroring, in most cases the incident.
:type entries: ``list``
:param entries: The entries you want to add to the war room.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, mirrored_object, entries):
self.mirrored_object = mirrored_object
self.entries = entries
def extract_for_local(self):
"""Extracts the response into the mirrored incident.
:return: List of details regarding the mirrored incident.
:rtype: ``list``
"""
if self.mirrored_object:
return [self.mirrored_object] + self.entries
class GetModifiedRemoteDataResponse:
"""get-modified-remote-data response parser
:type modified_incident_ids: ``list``
:param modified_incident_ids: The incidents that were modified since the last check.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, modified_incident_ids):
self.modified_incident_ids = modified_incident_ids
def to_entry(self):
"""Extracts the response
:return: List of incidents to run the get-remote-data command on.
:rtype: ``list``
"""
demisto.info('Modified incidents: {}'.format(self.modified_incident_ids))
return {'Contents': self.modified_incident_ids, 'Type': EntryType.NOTE, 'ContentsFormat': EntryFormat.JSON}
class SchemeTypeMapping:
"""Scheme type mappings builder.
:type type_name: ``str``
:param type_name: The name of the remote incident type.
:type fields: ``dict``
:param fields: The dict of fields to their description.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, type_name='', fields=None):
self.type_name = type_name
self.fields = fields if fields else {}
def add_field(self, name, description=''):
"""Adds a field to the incident type mapping.
:type name: ``str``
:param name: The name of the field.
:type description: ``str``
:param description: The description for that field.a
:return: No data returned
:rtype: ``None``
"""
self.fields.update({
name: description
})
def extract_mapping(self):
"""Extracts the mapping into XSOAR mapping screen.
:return: the mapping object for the current field.
:rtype: ``dict``
"""
return {
self.type_name: self.fields
}
class GetMappingFieldsResponse:
"""Handler for the mapping fields object.
:type scheme_types_mapping: ``list``
:param scheme_types_mapping: List of all the mappings in the remote system.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, scheme_types_mapping=None):
self.scheme_types_mappings = scheme_types_mapping if scheme_types_mapping else []
def add_scheme_type(self, scheme_type_mapping):
"""Add another incident type mapping.
:type scheme_type_mapping: ``dict``
:param scheme_type_mapping: mapping of a singular field.
:return: No data returned
:rtype: ``None``
"""
self.scheme_types_mappings.append(scheme_type_mapping)
def extract_mapping(self):
"""Extracts the mapping into XSOAR mapping screen.
:return: the mapping object for the current field.
:rtype: ``dict``
"""
all_mappings = {}
for scheme_types_mapping in self.scheme_types_mappings:
all_mappings.update(scheme_types_mapping.extract_mapping())
return all_mappings
def get_x_content_info_headers():
"""Get X-Content-* headers to send in outgoing requests to use when performing requests to
external services such as oproxy.
:return: headers dict
:rtype: ``dict``
"""
calling_context = demisto.callingContext.get('context', {})
brand_name = calling_context.get('IntegrationBrand', '')
instance_name = calling_context.get('IntegrationInstance', '')
headers = {
'X-Content-Version': CONTENT_RELEASE_VERSION,
'X-Content-Name': brand_name or instance_name or 'Name not found',
'X-Content-LicenseID': demisto.getLicenseID(),
'X-Content-Branch': CONTENT_BRANCH_NAME,
'X-Content-Server-Version': get_demisto_version_as_str(),
}
return headers
class BaseWidget:
@abstractmethod
def to_display(self):
pass
class TextWidget(BaseWidget):
"""Text Widget representation
:type text: ``str``
:param text: The text for the widget to display
:return: No data returned
:rtype: ``None``
"""
def __init__(self, text):
# type: (str) -> None
self.text = text
def to_display(self):
"""Text Widget representation
:type text: ``str``
:param text: The text for the widget to display
:return: No data returned
:rtype: ``None``
"""
return self.text
class TrendWidget(BaseWidget):
"""Trend Widget representation
:type current_number: ``int``
:param current_number: The Current number in the trend.
:type previous_number: ``int``
:param previous_number: The previous number in the trend.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, current_number, previous_number):
# type: (int, int) -> None
self.current_number = current_number
self.previous_number = previous_number
def to_display(self):
return json.dumps({
'currSum': self.current_number,
'prevSum': self.previous_number
})
class NumberWidget(BaseWidget):
"""Number Widget representation
:type number: ``int``
:param number: The number for the widget to display.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, number):
# type: (int) -> None
self.number = number
def to_display(self):
return self.number
class BarColumnPieWidget(BaseWidget):
"""Bar/Column/Pie Widget representation
:type categories: ``list``
:param categories: a list of categories to display(Better use the add_category function to populate the data.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, categories=None):
# type: (list) -> None
self.categories = categories if categories else [] # type: List[dict]
def add_category(self, name, number):
"""Add a category to widget.
:type name: ``str``
:param name: the name of the category to add.
:type number: ``int``
:param number: the number value of the category.
:return: No data returned.
:rtype: ``None``
"""
self.categories.append({
'name': name,
'data': [number]
})
def to_display(self):
return json.dumps(self.categories)
class LineWidget(BaseWidget):
"""Line Widget representation
:type categories: ``Any``
:param categories: a list of categories to display(Better use the add_category function to populate the data.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, categories=None):
# type: (list) -> None
self.categories = categories if categories else [] # type: List[dict]
def add_category(self, name, number, group):
"""Add a category to widget.
:type name: ``str``
:param name: the name of the category to add.
:type number: ``int``
:param number: the number value of the category.
:type group: ``str``
:param group: the name of the relevant group.
:return: No data returned
:rtype: ``None``
"""
self.categories.append({
'name': name,
'data': [number],
'groups': [
{
'name': group,
'data': [number]
},
]
})
def to_display(self):
processed_names = [] # type: List[str]
processed_categories = [] # type: List[dict]
for cat in self.categories:
if cat['name'] in processed_names:
for processed_category in processed_categories:
if cat['name'] == processed_category['name']:
processed_category['data'] = [processed_category['data'][0] + cat['data'][0]]
processed_category['groups'].extend(cat['groups'])
break
else:
processed_categories.append(cat)
processed_names.append(cat['name'])
return json.dumps(processed_categories)
class TableOrListWidget(BaseWidget):
"""Table/List Widget representation
:type data: ``Any``
:param data: a list of data to display(Better use the add_category function to populate the data.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, data=None):
# type: (Any) -> None
self.data = data if data else []
if not isinstance(self.data, list):
self.data = [data]
def add_row(self, data):
"""Add a row to the widget.
:type data: ``Any``
:param data: the data to add to the list/table.
:return: No data returned
:rtype: ``None``
"""
self.data.append(data)
def to_display(self):
return json.dumps({
'total': len(self.data),
'data': self.data
})
class IndicatorsSearcher:
"""Used in order to search indicators by the paging or serachAfter param
:type page: ``int``
:param page: the number of page from which we start search indicators from.
:type filter_fields: ``str``
:param filter_fields: comma separated fields to filter (e.g. "value,type")
:return: No data returned
:rtype: ``None``
"""
def __init__(self, page=0, filter_fields=None):
# searchAfter is available in searchIndicators from version 6.1.0
self._can_use_search_after = is_demisto_version_ge('6.1.0')
# populateFields merged in https://github.com/demisto/server/pull/18398
self._can_use_filter_fields = is_demisto_version_ge('6.1.0', build_number='1095800')
self._search_after_title = 'searchAfter'
self._search_after_param = None
self._page = page
self._filter_fields = filter_fields
def search_indicators_by_version(self, from_date=None, query='', size=100, to_date=None, value=''):
"""There are 2 cases depends on the sever version:
1. Search indicators using paging, raise the page number in each call.
2. Search indicators using searchAfter param, update the _search_after_param in each call.
:type from_date: ``str``
:param from_date: the start date to search from.
:type query: ``str``
:param query: indicator search query
:type size: ``size``
:param size: limit the number of returned results.
:type to_date: ``str``
:param to_date: the end date to search until to.
:type value: ``str``
:param value: the indicator value to search.
:return: object contains the search results
:rtype: ``dict``
"""
if self._can_use_search_after:
# if search_after_param exists use it for paging, else use the page number
search_iocs_params = assign_params(
fromDate=from_date,
toDate=to_date,
query=query,
size=size,
value=value,
searchAfter=self._search_after_param,
populateFields=self._filter_fields if self._can_use_filter_fields else None,
page=self._page if not self._search_after_param else None
)
res = demisto.searchIndicators(**search_iocs_params)
self._search_after_param = res[self._search_after_title]
if res[self._search_after_title] is None:
demisto.info('Elastic search using searchAfter returned all indicators')
else:
res = demisto.searchIndicators(fromDate=from_date, toDate=to_date, query=query, size=size, page=self._page,
value=value)
self._page += 1
return res
@property
def page(self):
return self._page
class AutoFocusKeyRetriever:
"""AutoFocus API Key management class
:type api_key: ``str``
:param api_key: Auto Focus API key coming from the integration parameters
:type override_default_credentials: ``bool``
:param override_default_credentials: Whether to override the default credentials and use the
Cortex XSOAR given AutoFocus API Key
:return: No data returned
:rtype: ``None``
"""
def __init__(self, api_key):
# demisto.getAutoFocusApiKey() is available from version 6.2.0
if not api_key:
if not is_demisto_version_ge("6.2.0"): # AF API key is available from version 6.2.0
raise DemistoException('For versions earlier than 6.2.0, configure an API Key.')
try:
api_key = demisto.getAutoFocusApiKey() # is not available on tenants
except ValueError as err:
raise DemistoException('AutoFocus API Key is only available on the main account for TIM customers. ' + str(err))
self.key = api_key
def get_feed_last_run():
"""
This function gets the feed's last run: from XSOAR version 6.2.0: using `demisto.getLastRun()`.
Before XSOAR version 6.2.0: using `demisto.getIntegrationContext()`.
:rtype: ``dict``
:return: All indicators from the feed's last run
"""
if is_demisto_version_ge('6.2.0'):
feed_last_run = demisto.getLastRun() or {}
if not feed_last_run:
integration_ctx = demisto.getIntegrationContext()
if integration_ctx:
feed_last_run = integration_ctx
demisto.setLastRun(feed_last_run)
demisto.setIntegrationContext({})
else:
feed_last_run = demisto.getIntegrationContext() or {}
return feed_last_run
def set_feed_last_run(last_run_indicators):
"""
This function sets the feed's last run: from XSOAR version 6.2.0: using `demisto.setLastRun()`.
Before XSOAR version 6.2.0: using `demisto.setIntegrationContext()`.
:type last_run_indicators: ``dict``
:param last_run_indicators: Indicators to save in "lastRun" object.
:rtype: ``None``
:return: None
"""
if is_demisto_version_ge('6.2.0'):
demisto.setLastRun(last_run_indicators)
else:
demisto.setIntegrationContext(last_run_indicators)
def support_multithreading():
"""Adds lock on the calls to the Cortex XSOAR server from the Demisto object to support integration which use multithreading.
:return: No data returned
:rtype: ``None``
"""
global demisto
prev_do = demisto._Demisto__do # type: ignore[attr-defined]
demisto.lock = Lock() # type: ignore[attr-defined]
def locked_do(cmd):
try:
if demisto.lock.acquire(timeout=60): # type: ignore[call-arg,attr-defined]
return prev_do(cmd) # type: ignore[call-arg]
else:
raise RuntimeError('Failed acquiring lock')
finally:
demisto.lock.release() # type: ignore[attr-defined]
demisto._Demisto__do = locked_do # type: ignore[attr-defined]
| mit | 4,435,624,354,605,948,000 | 36.151411 | 738 | 0.577134 | false |
kkovaacs/zorp | pylib/Zorp/SockAddr.py | 1 | 9351 | ############################################################################
##
## Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
## 2010, 2011 BalaBit IT Ltd, Budapest, Hungary
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
############################################################################
"""
<module maturity="stable">
<summary>
Module defining interface to the SockAddr.
</summary>
<description>
<para>
This module implements <parameter>inet_ntoa</parameter> and <parameter>inet_aton</parameter>. The module also provides an interface
to the SockAddr services of the Zorp core. SockAddr is used for example to define the bind address of
<link linkend="python.Dispatch.Dispatcher">Dispatchers</link>, or the address of the ZAS server in
<link linkend="python.AuthDB.AuthenticationProvider">AuthenticationProvider</link> policies.
</para>
</description>
</module>
"""
from string import split, atoi
from socket import htonl, ntohl
def inet_aton(ip):
"""
<function maturity="stable">
<summary>
Function to convert an internet address to a 32-bit integer.
</summary>
<description>
<para>
This function converts the string representation of an IPv4 address
to an integer in network byte order.
Returns unsigned long in network byte order.
</para>
</description>
<metainfo>
<arguments>
<argument maturity="stable">
<name>ip</name>
<type><string/></type>
<description>A dotted-quad string</description>
</argument>
</arguments>
</metainfo>
</function>
"""
# FIXME: there is no parameter check
parts = split(ip, '.', 4);
return htonl(atoi(parts[0]) << 24 | \
atoi(parts[1]) << 16 | \
atoi(parts[2]) << 8 | \
atoi(parts[3]))
def inet_ntoa(ip):
"""
<function maturity="stable">
<summary>
Function to convert a 32-bit integer into an IPv4 address.
</summary>
<description>
<para>
This function converts an IP address from network byte order
into its string representation (dotted quad).
Returns string representation of the IP address.
</para>
</description>
<metainfo>
<arguments>
<argument maturity="stable">
<name>ip</name>
<type></type>
<description>The IP address as a 32-bit integer (network byte order).</description>
</argument>
</arguments>
</metainfo>
</function>
"""
ip = ntohl(ip)
parts = (((ip & 0xff000000) >> 24) & 0xff,
(ip & 0x00ff0000) >> 16,
(ip & 0x0000ff00) >> 8,
(ip & 0x000000ff))
return "%u.%u.%u.%u" % parts
class SockAddrInet:
"""
<class maturity="stable">
<summary>
Class encapsulating an IPv4 address:port pair.
</summary>
<description>
<para>
This class encapsulates an IPv4 address:port pair, similarly to
the <parameter>sockaddr_in</parameter> struct in C. The class is implemented and exported by
the Zorp core. The <parameter>SockAddrInet</parameter> Python class serves only
documentation purposes, and has no real connection to the
behavior implemented in C.
</para>
<example>
<title>SockAddrInet example</title>
<para>
The following example defines an IPv4 address:port pair.</para>
<synopsis>
SockAddrInet('192.168.10.10', 80)
</synopsis>
<para>
The following example uses SockAddrInet in a dispatcher. See <xref linkend="python.Dispatch.Dispatcher"/> for details on Dispatchers.
</para>
<synopsis>
Dispatcher(transparent=TRUE, bindto=DBSockAddr(protocol=ZD_PROTO_TCP, sa=SockAddrInet('192.168.11.11', 50080)), service="intra_HTTP_inter", backlog=255, rule_port="50080")
</synopsis>
</example>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>type</name>
<type><string/></type>
<description>The <parameter>inet</parameter> value that indicates an address in the AF_INET domain.</description>
</attribute>
<attribute maturity="stable">
<name>ip</name>
<type></type>
<description>IP address (network byte order).</description>
</attribute>
<attribute maturity="stable">
<name>ip_s</name>
<type></type>
<description>IP address in string representation.</description>
</attribute>
<attribute maturity="stable">
<name>port</name>
<type></type>
<description>Port number (network byte order).</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
pass
class SockAddrInetRange:
"""
<class maturity="stable">
<summary>
Class encapsulating an IPv4 address and a port range.
</summary>
<description>
<para>
A specialized SockAddrInet class which allocates a new port
within the given range of ports when a dispatcher bounds to it.
The class is implemented and exported by
the Zorp core. The <parameter>SockAddrInetRange</parameter> Python class serves only
documentation purposes, and has no real connection to the
behavior implemented in C.
</para>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>type</name>
<type><string/></type>
<description>The <parameter>inet</parameter> value that indicates an address in the AF_INET domain.</description>
</attribute>
<attribute maturity="stable">
<name>ip</name>
<type></type>
<description>IP address (network byte order).</description>
</attribute>
<attribute maturity="stable">
<name>ip_s</name>
<type></type>
<description>IP address in string representation.</description>
</attribute>
<attribute maturity="stable">
<name>port</name>
<type></type>
<description>Port number (network byte order).</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
pass
class SockAddrUnix:
"""
<class maturity="stable">
<summary>
Class encapsulating a UNIX domain socket.
</summary>
<description>
<para>
This class encapsulates a UNIX domain socket endpoint.
The socket is represented by a filename. The <parameter>SockAddrUnix</parameter>
Python class serves only
documentation purposes, and has no real connection to the
behavior implemented in C.
</para>
<example>
<title>SockAddrUnix example</title>
<para>
The following example defines a Unix domain socket.</para>
<synopsis>
SockAddrUnix('/var/sample.socket')
</synopsis>
<para>
The following example uses SockAddrUnix in a DirectedRouter.
</para>
<synopsis>
Service(name="demo_service", proxy_class=HttpProxy, router=DirectedRouter(dest_addr=SockAddrUnix('/var/sample.socket'), overrideable=FALSE, forge_addr=FALSE))
</synopsis>
</example>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>type</name>
<type><string/></type>
<description>The <parameter>unix</parameter> value that indicates an address in the UNIX domain.</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
#class SockAddrInet6(SockAddr):
# def __init__(self, ip, port):
# SockAddr.__init__(self, 'inet6')
# self.ip = ip
# self.port = port
| gpl-2.0 | 7,737,876,345,786,557,000 | 35.960474 | 171 | 0.556411 | false |
voostar/hp_laserprinter_monitor | frontline/frontline/settings.py | 1 | 5540 | # Django settings for chifanbu project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'hp_laserprinter_monitor', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'long841205',
'HOST': '10.8.144.247', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'static',),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'yw#t3j-j3+v7_(mb2a#xlk7k7uu@gtu75-%7&&&bl*dvbc+j@8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'frontline.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'frontline.wsgi.application'
TEMPLATE_DIRS = (
"templates",
"templates/displayer",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'displayer',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| unlicense | -5,563,714,469,655,122,000 | 33.409938 | 139 | 0.686643 | false |
piti118/docker-compose-ui | scripts/bridge.py | 1 | 1639 | """
bridge to docker-compose
"""
import logging
from compose.container import Container
from compose.cli.command import get_project as compose_get_project, get_config_path_from_options
from compose.config.config import get_default_config_files
from compose.config.environment import Environment
def ps_(project):
"""
containers status
"""
logging.debug('ps ' + project.name)
containers = project.containers(stopped=True)
items = [{
'name': container.name,
'name_without_project': container.name_without_project,
'command': container.human_readable_command,
'state': container.human_readable_state,
'labels': container.labels,
'ports': container.ports,
'volumes': get_volumes(get_container_from_id(project.client, container.id)),
'is_running': container.is_running} for container in containers]
return items
def get_container_from_id(client, container_id):
"""
return the docker container from a given id
"""
return Container.from_id(client, container_id)
def get_volumes(container):
"""
retrieve container volumes details
"""
return container.get('Config.Volumes')
def get_yml_path(path):
"""
get path of docker-compose.yml file
"""
return get_default_config_files(path)[0]
def get_project(path):
"""
get docker project given file path
"""
logging.debug('get project ' + path)
environment = Environment.from_env_file(path)
config_path = get_config_path_from_options(path, dict(), environment)
project = compose_get_project(path, config_path)
return project
| mit | -2,677,457,798,376,228,000 | 27.258621 | 96 | 0.682123 | false |
segfaulthunter/asynchia | asynchia/forthcoming.py | 1 | 7047 | # -*- coding: us-ascii -*-
# asynchia - asynchronous networking library
# Copyright (C) 2009 Florian Mayer <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Facilities to refer to data that is not yet available.
Example:
# Usually acquired by a call that results in network I/O.
# Global variable for demostration purposes.
a = DataNotifier()
def bar():
# Request result of network I/O.
blub = yield a
yield Coroutine.return_(blub)
def foo():
# Wait for completion of new coroutine which - in turn - waits
# for I/O.
blub = yield Coroutine.call_itr(bar())
print "yay %s" % blub
c = Coroutine(foo())
c.call()
# Network I/O complete.
a.submit('blub')
"""
import threading
import asynchia
from asynchia.util import b
_NULL = object()
class PauseContext(object):
""" Collection of Coroutines which are currently paused but not waiting
for any data. They are paused to prevent too much time to be spent in
them, preventing possibly important I/O from being done. """
def __init__(self):
self.paused = []
def unpause(self):
""" Continue all paused coroutines. """
for coroutine in self.paused:
coroutine.call()
self.paused[:] = []
def pause(self, coroutine):
""" Add coroutine to the list of paused coroutines. """
self.paused.append(coroutine)
class Coroutine(object):
""" Create coroutine from given iterator. Yielding None will pause the co-
routine until continuation by the given PauseContext. Yielding a
DataNotifier will send the requested value to the coroutine once
available. Yielding an instance of Coroutine.return_ will end execution
of the coroutine and send the return value to any coroutines that may be
waiting for it or calls any callbacks associated with it. """
class return_:
""" Yield an instance of this to signal that the coroutine finished
with the given return value (defaults to None). """
def __init__(self, obj=None):
self.obj = obj
def __init__(self, itr, pcontext=None, datanotifier=None):
self.itr = itr
if datanotifier is None:
datanotifier = DataNotifier()
self.datanotifier = datanotifier
self.pcontext = pcontext
def send(self, data):
""" Send requested data to coroutine. """
try:
self.handle_result(self.itr.send(data))
except StopIteration:
self.datanotifier.submit(None)
def call(self):
""" Start (or resume) execution of the coroutine. """
try:
self.handle_result(self.itr.next())
except StopIteration:
self.datanotifier.submit(None)
def handle_result(self, result):
""" Internal. """
if result is None:
if self.pcontext is not None:
self.pcontext.pause(self)
else:
raise ValueError("No PauseContext.")
elif isinstance(result, Coroutine.return_):
self.datanotifier.submit(result.obj)
else:
result.add_coroutine(self)
@classmethod
def call_itr(cls, itr):
""" Create a coroutine from the given iterator, start it
and return the DataNotifier. """
coroutine = cls(itr)
coroutine.call()
return coroutine.datanotifier
class DataNotifier(object):
""" Call registered callbacks and send data to registered coroutines
at submission of data. """
def __init__(self, socket_map):
self.dcallbacks = []
self.rcallbacks = []
self.coroutines = []
self.finished = False
self.data = _NULL
self.event = threading.Event()
self.socket_map = socket_map
def add_coroutine(self, coroutine):
""" Add coroutine that waits for the submission of this data. """
if self.data is _NULL:
self.coroutines.append(coroutine)
else:
coroutine.send(self.data)
def add_databack(self, callback):
""" Add databack (function that receives the the data-notifier data
upon submission as arguments). """
if self.data is _NULL:
self.dcallbacks.append(callback)
else:
callback(self.data)
def add_callback(self, callback):
""" Add callback (function that only receives the data upon
submission as an argument). """
if self.data is _NULL:
self.rcallbacks.append(callback)
else:
callback(self, self.data)
def poll(self):
""" Poll whether result has already been submitted. """
return self.finished
def submit(self, data):
""" Submit data; send it to any coroutines that may be registered and
call any data- and callbacks that may be registered. """
self.data = data
for callback in self.dcallbacks:
callback(data)
for callback in self.rcallbacks:
callback(self, data)
for coroutine in self.coroutines:
coroutine.send(data)
self.coroutines[:] = []
self.rcallbacks[:] = []
self.dcallbacks[:] = []
# Wake up threads waiting for the data.
self.event.set()
self.finished = True
def inject(self, data):
""" Submit data and ensure their callbacks are called in the main
thread. """
self.socket_map.call_synchronized(lambda: self.submit(data))
def wait(self, timeout=None):
""" Block execution of current thread until the data is available.
Return requested data. """
self.event.wait(timeout)
return self.data
@staticmethod
def _coroutine(datanotifier, fun, args, kwargs):
""" Implementation detail. """
datanotifier.inject(fun(*args, **kwargs))
@classmethod
def threaded_coroutine(cls, socket_map, fun, *args, **kwargs):
""" Run fun(*args, **kwargs) in a thread and return a DataNotifier
notifying upon availability of the return value of the function. """
datanot = cls(socket_map)
threading.Thread(
target=cls._coroutine, args=(datanot, fun, args, kwargs)
).start()
return datanot
| gpl-3.0 | -4,675,045,574,142,878,000 | 33.043478 | 78 | 0.618277 | false |
patrickdw123/ParanoiDF | PDFUtils.py | 1 | 15029 | # ParanoiDF. A combination of several PDF analysis/manipulation tools to
# produce one of the most technically useful PDF analysis tools.
#
# Idea proposed by Julio Hernandez-Castro, University of Kent, UK.
# By Patrick Wragg
# University of Kent
# 21/07/2014
#
# With thanks to:
# Julio Hernandez-Castro, my supervisor.
# Jose Miguel Esparza for writing PeePDF (the basis of this tool).
# Didier Stevens for his "make-PDF" tools.
# Blake Hartstein for Jsunpack-n.
# Yusuke Shinyama for Pdf2txt.py (PDFMiner)
# Nacho Barrientos Arias for Pdfcrack.
# Kovid Goyal for Calibre (DRM removal).
# Jay Berkenbilt for QPDF.
#
# Copyright (C) 2014-2018 Patrick Wragg
#
# This file is part of ParanoiDF.
#
# ParanoiDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ParanoiDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ParanoiDF. If not, see <http://www.gnu.org/licenses/>.
#
# This was written by Jose Miguel Esparza for the tool PeePDF.
'''
Module with some misc functions
'''
import os, re, htmlentitydefs, json, urllib, urllib2
def clearScreen():
'''
Simple method to clear the screen depending on the OS
'''
if os.name == 'nt':
os.system('cls')
elif os.name == 'posix':
os.system('reset')
elif os.name == 'mac':
os.system('clear')
def countArrayElements(array):
'''
Simple method to count the repetitions of elements in an array
@param array: An array of elements
@return: A tuple (elements,counters), where elements is a list with the distinct elements and counters is the list with the number of times they appear in the array
'''
elements = []
counters = []
for element in array:
if element in elements:
indx = elements.index(element)
counters[indx] += 1
else:
elements.append(element)
counters.append(1)
return elements,counters
def countNonPrintableChars(string):
'''
Simple method to return the non printable characters found in an string
@param string: A string
@return: Number of non printable characters in the string
'''
counter = 0
for i in range(len(string)):
if ord(string[i]) <= 31 or ord(string[i]) > 127:
counter += 1
return counter
def decodeName(name):
'''
Decode the given PDF name
@param name: A PDFName string to decode
@return: A tuple (status,statusContent), where statusContent is the decoded PDF name in case status = 0 or an error in case status = -1
'''
decodedName = name
hexNumbers = re.findall('#([0-9a-f]{2})', name, re.DOTALL | re.IGNORECASE)
for hexNumber in hexNumbers:
try:
decodedName = decodedName.replace('#'+hexNumber,chr(int(hexNumber,16)))
except:
return (-1,'Error decoding name')
return (0,decodedName)
def decodeString(string):
'''
Decode the given PDF string
@param string: A PDFString to decode
@return A tuple (status,statusContent), where statusContent is the decoded PDF string in case status = 0 or an error in case status = -1
'''
decodedString = string
octalNumbers = re.findall('\\\\([0-7]{1-3})', decodedString, re.DOTALL)
for octal in octalNumbers:
try:
decodedString = decodedString.replace('\\\\'+octal,chr(int(octal,8)))
except:
return (-1,'Error decoding string')
return (0,decodedString)
def encodeName(name):
'''
Encode the given PDF name
@param name: A PDFName string to encode
@return: A tuple (status,statusContent), where statusContent is the encoded PDF name in case status = 0 or an error in case status = -1
'''
encodedName = ''
if name[0] == '/':
name = name[1:]
for char in name:
if char == '\0':
encodedName += char
else:
try:
hex = '%x' % ord(char)
encodedName += '#'+hex
except:
return (-1,'Error encoding name')
return (0,'/'+encodedName)
def encodeString(string):
'''
Encode the given PDF string
@param string: A PDFString to encode
@return: A tuple (status,statusContent), where statusContent is the encoded PDF string in case status = 0 or an error in case status = -1
'''
encodedString = ''
try:
for char in string:
octal = '%o' % ord(char)
encodedString += '\\'+(3-len(octal))*'0'+octal
except:
return (-1,'Error encoding string')
return (0,encodedString)
def escapeRegExpString(string):
'''
Escape the given string to include it as a regular expression
@param string: A regular expression to be escaped
@return: Escaped string
'''
toEscapeChars = ['\\','(',')','.','|','^','$','*','+','?','[',']']
escapedValue = ''
for i in range(len(string)):
if string[i] in toEscapeChars:
escapedValue += '\\'+string[i]
else:
escapedValue += string[i]
return escapedValue
def escapeString(string):
'''
Escape the given string
@param string: A string to be escaped
@return: Escaped string
'''
toEscapeChars = ['\\','(',')']
escapedValue = ''
for i in range(len(string)):
if string[i] in toEscapeChars and (i == 0 or string[i-1] != '\\'):
if string[i] == '\\':
if len(string) > i+1 and re.match('[0-7]',string[i+1]):
escapedValue += string[i]
else:
escapedValue += '\\'+string[i]
else:
escapedValue += '\\'+string[i]
elif string[i] == '\r':
escapedValue += '\\r'
elif string[i] == '\n':
escapedValue += '\\n'
elif string[i] == '\t':
escapedValue += '\\t'
elif string[i] == '\b':
escapedValue += '\\b'
elif string[i] == '\f':
escapedValue += '\\f'
else:
escapedValue += string[i]
return escapedValue
def getBitsFromNum(num, bitsPerComponent = 8):
'''
Makes the conversion between number and bits
@param num: Number to be converted
@param bitsPerComponent: Number of bits needed to represent a component
@return: A tuple (status,statusContent), where statusContent is the string containing the resulting bits in case status = 0 or an error in case status = -1
'''
if not isinstance(num,int):
return (-1,'num must be an integer')
if not isinstance(bitsPerComponent,int):
return (-1,'bitsPerComponent must be an integer')
try:
bitsRepresentation = bin(num)
bitsRepresentation = bitsRepresentation.replace('0b','')
mod = len(bitsRepresentation) % 8
if mod != 0:
bitsRepresentation = '0'*(8-mod) + bitsRepresentation
bitsRepresentation = bitsRepresentation[-1*bitsPerComponent:]
except:
return (-1,'Error in conversion from number to bits')
return (0,bitsRepresentation)
def getNumsFromBytes(bytes, bitsPerComponent = 8):
'''
Makes the conversion between bytes and numbers, depending on the number of bits used per component.
@param bytes: String representing the bytes to be converted
@param bitsPerComponent: Number of bits needed to represent a component
@return: A tuple (status,statusContent), where statusContent is a list of numbers in case status = 0 or an error in case status = -1
'''
if not isinstance(bytes,str):
return (-1,'bytes must be a string')
if not isinstance(bitsPerComponent,int):
return (-1,'bitsPerComponent must be an integer')
outputComponents = []
bitsStream = ''
for byte in bytes:
try:
bitsRepresentation = bin(ord(byte))
bitsRepresentation = bitsRepresentation.replace('0b','')
bitsRepresentation = '0'*(8-len(bitsRepresentation)) + bitsRepresentation
bitsStream += bitsRepresentation
except:
return (-1,'Error in conversion from bytes to bits')
try:
for i in range(0,len(bitsStream),bitsPerComponent):
bytes = ''
bits = bitsStream[i:i+bitsPerComponent]
num = int(bits,2)
outputComponents.append(num)
except:
return (-1,'Error in conversion from bits to bytes')
return (0,outputComponents)
def getBytesFromBits(bitsStream):
'''
Makes the conversion between bits and bytes.
@param bitsStream: String representing a chain of bits
@return: A tuple (status,statusContent), where statusContent is the string containing the resulting bytes in case status = 0 or an error in case status = -1
'''
if not isinstance(bitsStream,str):
return (-1,'The bitsStream must be a string')
bytes = ''
if re.match('[01]*$',bitsStream):
try:
for i in range(0,len(bitsStream),8):
bits = bitsStream[i:i+8]
byte = chr(int(bits,2))
bytes += byte
except:
return (-1,'Error in conversion from bits to bytes')
return (0,bytes)
else:
return (-1,'The format of the bit stream is not correct')
def getBytesFromFile(filename, offset, numBytes):
'''
Returns the number of bytes specified from a file, starting from the offset specified
@param filename: Name of the file
@param offset: Bytes offset
@param numBytes: Number of bytes to retrieve
@return: A tuple (status,statusContent), where statusContent is the bytes read in case status = 0 or an error in case status = -1
'''
if not isinstance(offset,int) or not isinstance(numBytes,int):
return (-1,'The offset and the number of bytes must be integers')
if os.path.exists(filename):
fileSize = os.path.getsize(filename)
bytesFile = open(filename,'rb')
bytesFile.seek(offset)
if offset+numBytes > fileSize:
bytes = bytesFile.read()
else:
bytes = bytesFile.read(numBytes)
bytesFile.close()
return (0,bytes)
else:
return (-1,'File does not exist')
def hexToString(hexString):
'''
Simple method to convert an hexadecimal string to ascii string
@param hexString: A string in hexadecimal format
@return: A tuple (status,statusContent), where statusContent is an ascii string in case status = 0 or an error in case status = -1
'''
string = ''
if len(hexString) % 2 != 0:
hexString = '0'+hexString
try:
for i in range(0,len(hexString),2):
string += chr(int(hexString[i]+hexString[i+1],16))
except:
return (-1,'Error in hexadecimal conversion')
return (0,string)
def numToHex(num, numBytes):
'''
Given a number returns its hexadecimal format with the specified length, adding '\0' if necessary
@param num: A number (int)
@param numBytes: Length of the output (int)
@return: A tuple (status,statusContent), where statusContent is a number in hexadecimal format in case status = 0 or an error in case status = -1
'''
hexString = ''
if not isinstance(num,int):
return (-1,'Bad number')
try:
hexNumber = hex(num)[2:]
if len(hexNumber) % 2 != 0:
hexNumber = '0'+hexNumber
for i in range(0,len(hexNumber)-1,2):
hexString += chr(int(hexNumber[i]+hexNumber[i+1],16))
hexString = '\0'*(numBytes-len(hexString))+hexString
except:
return (-1,'Error in hexadecimal conversion')
return (0,hexString)
def numToString(num, numDigits):
'''
Given a number returns its string format with the specified length, adding '0' if necessary
@param num: A number (int)
@param numDigits: Length of the output string (int)
@return: A tuple (status,statusContent), where statusContent is a number in string format in case status = 0 or an error in case status = -1
'''
if not isinstance(num,int):
return (-1,'Bad number')
strNum = str(num)
if numDigits < len(strNum):
return (-1,'Bad digit number')
for i in range(numDigits-len(strNum)):
strNum = '0' + strNum
return (0,strNum)
def unescapeHTMLEntities(text):
'''
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Author: Fredrik Lundh
Source: http://effbot.org/zone/re-sub.htm#unescape-html
'''
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def unescapeString(string):
'''
Unescape the given string
@param string: An escaped string
@return: Unescaped string
'''
toUnescapeChars = ['\\','(',')']
unescapedValue = ''
i = 0
while i < len(string):
if string[i] == '\\' and i != len(string)-1:
if string[i+1] in toUnescapeChars:
if string[i+1] == '\\':
unescapedValue += '\\'
i += 1
else:
pass
elif string[i+1] == 'r':
i += 1
unescapedValue += '\r'
elif string[i+1] == 'n':
i += 1
unescapedValue += '\n'
elif string[i+1] == 't':
i += 1
unescapedValue += '\t'
elif string[i+1] == 'b':
i += 1
unescapedValue += '\b'
elif string[i+1] == 'f':
i += 1
unescapedValue += '\f'
else:
unescapedValue += string[i]
else:
unescapedValue += string[i]
i += 1
return unescapedValue
def vtcheck(md5, vtKey):
'''
Function to check a hash on VirusTotal and get the report summary
@param md5: The MD5 to check (hexdigest)
@param vtKey: The VirusTotal API key needed to perform the request
@return: A dictionary with the result of the request
'''
vtUrl = 'https://www.virustotal.com/vtapi/v2/file/report'
parameters = {'resource':md5,'apikey':vtKey}
try:
data = urllib.urlencode(parameters)
req = urllib2.Request(vtUrl, data)
response = urllib2.urlopen(req)
jsonResponse = response.read()
except:
return (-1, 'The request to VirusTotal has not been successful')
try:
jsonDict = json.loads(jsonResponse)
except:
return (-1, 'An error has occurred while parsing the JSON response from VirusTotal')
return (0, jsonDict)
| gpl-3.0 | -899,530,424,950,584,600 | 32.103524 | 166 | 0.623461 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.