filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_25694
|
import os
import tempfile
import tensorflow as tf
import zipfile
import cloudpickle
import numpy as np
import baselines.common.tf_util as U
from baselines.common.tf_util import load_variables, save_variables
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.common import set_global_seeds
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.tf_util import get_session
from baselines.deepq.models import build_q_func
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
self.initial_state = None
@staticmethod
def load_act(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act = deepq.build_act(**act_params)
sess = tf.Session()
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
load_variables(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def step(self, observation, **kwargs):
# DQN doesn't use RNNs so we ignore states and masks
kwargs.pop('S', None)
kwargs.pop('M', None)
return self._act([observation], **kwargs), None, None, None
def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def save(self, path):
save_variables(path)
def load_act(path):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load_act(path)
def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
exploration_scheduler = None,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batch sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = exploration_scheduler or LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
num_episodes = len(episode_rewards)
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) if \
num_episodes > 1 else 0.0
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act
|
the-stack_0_25695
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTCBAM too low! (Should be %s BTCBAM)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTCBAM too high! (Should be %s BTCBAM)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTCBAM values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = inspect.getsourcelines(predicate)
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoinbam.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoinbam.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoinbam.conf")):
with open(os.path.join(datadir, "bitcoinbam.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
the-stack_0_25696
|
# -*- coding: utf-8 -*-
import warnings
class Deprecation(object):
"""Decorator factory for deprecating functions or classes.
This class represent deprecations of functions or classes and is designed
to be used with the ``warnings`` library.
Parameters
----------
last_supported_version : str, optional
Version string, e.g. ``'0.2.1'``.
will_be_missing_in : str, optional
Version string, e.g. ``'0.3.0'``.
use_instead : object or str, optional
Function or class to use instead or descriptive string.
issue : str, optional
issues_url : callback, optional
Converts issue to url, e.g. ``lambda s: 'https://github.com/user/repo/issues/%s/' % s.lstrip('gh-')``.
warning: DeprecationWarning, optional
Any subclass of DeprecationWarning, tip: you may invoke:
``warnings.simplefilter('once', MyWarning)`` at module init.
Examples
--------
>>> import warnings
>>> warnings.simplefilter("error", DeprecationWarning)
>>> @Deprecation()
... def f():
... return 1
...
>>> f() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DeprecationWarning: f is deprecated.
>>> @Deprecation(last_supported_version='0.4.0')
... def some_old_function(x):
... return x*x - x
...
>>> Deprecation.inspect(some_old_function).last_supported_version
'0.4.0'
>>> @Deprecation(will_be_missing_in='1.0')
... class ClumsyClass(object):
... pass
...
>>> Deprecation.inspect(ClumsyClass).will_be_missing_in
'1.0'
>>> warnings.resetwarnings()
Notes
-----
:class:`DeprecationWarning` is ignored by default. Use custom warning
and filter appropriately. Alternatively, run python with ``-W`` flag or set
the appropriate environment variable:
::
$ python -c 'import warnings as w; w.warn("X", DeprecationWarning)'
$ python -Wd -c 'import warnings as w; w.warn("X", DeprecationWarning)'
-c:1: DeprecationWarning: X
$ export PYTHONWARNINGS=d
$ python -c 'import warnings as w; w.warn("X", DeprecationWarning)'
-c:1: DeprecationWarning: X
"""
_deprecations = {}
def __init__(
self,
last_supported_version=None,
will_be_missing_in=None,
use_instead=None,
issue=None,
issues_url=None,
warning=DeprecationWarning,
):
if (
last_supported_version is not None
and not isinstance(last_supported_version, (str, tuple, list))
and callable(last_supported_version)
):
raise ValueError("last_supported_version not str, tuple or list")
self.last_supported_version = last_supported_version
self.will_be_missing_in = will_be_missing_in
self.use_instead = use_instead
self.issue = issue
self.issues_url = issues_url
self.warning = warning
self.warning_message = self._warning_message_template()
@classmethod
def inspect(cls, obj):
"""Get the :class:`Deprecation` instance of a deprecated function."""
return cls._deprecations[obj]
def _warning_message_template(self):
msg = "%(func_name)s is deprecated"
if self.last_supported_version is not None:
msg += " since (not including) % s" % self.last_supported_version
if self.will_be_missing_in is not None:
msg += ", it will be missing in %s" % self.will_be_missing_in
if self.issue is not None:
if self.issues_url is not None:
msg += self.issues_url(self.issue)
else:
msg += " (see issue %s)" % self.issue
if self.use_instead is not None:
try:
msg += ". Use %s instead" % self.use_instead.__name__
except AttributeError:
msg += ". Use %s instead" % self.use_instead
return msg + "."
def __call__(self, wrapped):
"""Decorates function to be deprecated"""
msg = self.warning_message % {"func_name": wrapped.__name__}
wrapped_doc = wrapped.__doc__ or ""
if hasattr(wrapped, "__mro__"): # wrapped is a class
class _Wrapper(wrapped):
__doc__ = msg + "\n\n" + wrapped_doc
def __init__(_self, *args, **kwargs):
warnings.warn(msg, self.warning, stacklevel=2)
wrapped.__init__(_self, *args, **kwargs)
else: # wrapped is a function
def _Wrapper(*args, **kwargs):
warnings.warn(msg, self.warning, stacklevel=2)
return wrapped(*args, **kwargs)
_Wrapper.__doc__ = msg + "\n\n" + wrapped_doc
self._deprecations[_Wrapper] = self
_Wrapper.__name__ = wrapped.__name__
_Wrapper.__module__ = wrapped.__module__
return _Wrapper
|
the-stack_0_25697
|
import setuptools
packages = setuptools.find_packages()
package_name = packages[0]
project_name = package_name.replace('_', '-')
setuptools.setup(
name=project_name,
license='MIT',
author='thewizardplusplus',
author_email='[email protected]',
url='https://github.com/thewizardplusplus/wizard-diary',
packages=packages,
install_requires=[
'gitpython >=2.1.3, <3.0',
'parsedatetime >=2.3, <3.0',
'tzlocal >=1.4, <2.0',
'xerox >=0.4.1, <1.0',
'termcolor >=1.1.0, <2.0',
],
python_requires='>=3.5, <4.0',
entry_points={'console_scripts': [
'{} = {}:main'.format(project_name, package_name),
]},
test_suite='{}.tests'.format(package_name),
)
|
the-stack_0_25701
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot physiological signal vs. time and return acquisition time for each repetition.
Created on Tue Jul 4 17:45:43 2017
@author: slevy
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
import argparse
import os
from datetime import datetime
class Param:
def __init__(self):
self.out_fname = 'physio'
self.sampling_period = 20 # ms
self.physiolog_type = 'cmrr' # type of physiolog, either "slr" (as coded by Simon Levy-Rosetti) or "cmrr" (as coded in CMRR sequences)
def main(log_fname, out_fname):
"""Main."""
# different processing depending on the physiolog type
if param_default.physiolog_type == 'slr':
# extract physio signal
time, physio_values, epi_acqtime, epi_event, acq_window = read_physiolog(log_fname)
# sort event times
reps_table = sort_event_times(epi_acqtime, epi_event)
# plot physio signal
plot_physio(time, physio_values, epi_acqtime, reps_table, acq_window, out_fname)
# write acquisition time of each measurement
pickle.dump([time, epi_acqtime, reps_table, physio_values, acq_window], open(out_fname+"_trigtimes.pickle", "wb"))
elif param_default.physiolog_type == 'cmrr':
# extract physio signal
time, trigger_start_time, trigger_end_time, physio_values, acq_window = read_physiolog_cmrr(log_fname)
# plot physio signal along with trigger start and end time
plot_physio_cmrr(time, trigger_start_time, trigger_end_time, physio_values, acq_window, out_fname)
# write acquisition time of each measurement
pickle.dump([time, trigger_start_time, trigger_end_time, physio_values, acq_window], open(out_fname + "_trigtimes.pickle", "wb"))
print('****Done.****')
def read_physiolog(path, sampling_period=20):
"""
Read physio logfile and parse it.
:param path:
sampling_period: in ms
:return:
"""
file = open(path, 'r')
text = file.readlines()
physio_sig = np.array(text[2].strip().split(' '), dtype=str)
# get stats in file footer
acq_window_idx_line = [text.index(line) for line in text if "AcqWin" in line]
acq_window = float(text[acq_window_idx_line[0]].strip().split(' ')[-1])
# get time axis, time of trigger start and time of trigger end
time, epi_acqtime, epi_event, physio_values = [], [], [], []
sampling_count = 0
for i_meas in range(len(physio_sig)):
if (physio_sig[i_meas] not in ['5000', '6000', '5002', '6002']) and ("[" not in physio_sig[i_meas]):
time.append(sampling_count*sampling_period)
physio_values.append(int(physio_sig[i_meas]))
sampling_count += 1
elif ("[" in physio_sig[i_meas]) and ("]" in physio_sig[i_meas]):
epi_acqtime.append((sampling_count-1)*sampling_period)
epi_event.append(physio_sig[i_meas])
return np.array(time), np.array(physio_values), np.array(epi_acqtime), np.array(epi_event), acq_window
def read_physiolog_cmrr(path, sampling_period=20):
"""
Read physio logfile and parse it.
:param path:
:return:
"""
file = open(path, 'r')
text = file.readlines()
physio_sig = np.array(text[0].strip().split(' '), dtype=str)
# get useful data in file footer
# Acquisition window
acq_window_idx_line = [text.index(line) for line in text if "AcqWin" in line]
acq_window = float(text[acq_window_idx_line[0]].strip().split(' ')[-1])
# Start time of acquisition
acqStartTime_idx_line = [text.index(line) for line in text if "LogStartMDHTime:" in line]
# acqStartTime = datetime.strptime(acqStartTime_date+'-'+str(int(acqStartTime_seconds/1000))+'.'+str(acqStartTime_seconds)[-5:], "%Y%m%d-%S.%f")
# acqStartTime = datetime.timedelta(milliseconds=float(text[acqStartTime_idx_line[0]].strip().split('LogStartMDHTime:')[-1]))
acqStartTime_time = datetime.utcfromtimestamp(float(text[acqStartTime_idx_line[0]].strip().split('LogStartMDHTime:')[-1]) / 1000.0)
acqStartTime_date = datetime.strptime(os.path.basename(path).split('_')[2], "%Y%m%d")
acqStartTime = datetime.combine(acqStartTime_date.date(), acqStartTime_time.time())
# # remove first (6002=end of info added by sequence, which was at the opening of the logfile) and last (last 5002=section added to indicate acquisition end) elements
# idx_start, idx_end = np.min(np.where(physio_sig == '6002')), np.max(np.where(physio_sig == '5002'))
# physio_sig = physio_sig[idx_start+1:idx_end]
# get time axis, time of trigger start, time of trigger end and physiological values
time, trigger_start_times, trigger_end_times, physio_values = [], [], [], []
sampling_count = 0
logInfo_ON = False
for i_meas in range(len(physio_sig)):
if (physio_sig[i_meas] not in ['5000', '6000', '5002', '6002']) and ("[" not in physio_sig[i_meas]) and (not logInfo_ON):
time.append(sampling_count*sampling_period)
physio_values.append(int(physio_sig[i_meas]))
sampling_count += 1
elif physio_sig[i_meas] == '5000':
trigger_start_times.append(sampling_count*sampling_period)
elif physio_sig[i_meas] == '6000':
trigger_end_times.append(sampling_count*sampling_period)
elif physio_sig[i_meas] == '5002':
logInfo_ON = True
elif physio_sig[i_meas] == '6002':
logInfo_ON = False
return np.array(time), np.array(trigger_start_times), np.array(trigger_end_times), np.array(physio_values), acq_window, acqStartTime
def sort_event_times(epi_acqtimes, epi_events):
"""
:param epi_acqtimes:
:param epi_events:
:return:
"""
if len(epi_acqtimes) != len(epi_events):
os.error("ERROR: Number of times and events are different.")
# extract rep and slice numbers of each scan event
reps, slices = [], []
for i_event in range(len(epi_acqtimes)):
reps.append(int(epi_events[i_event].strip().split('Rep#=')[1].split(',')[0]))
slices.append(int(epi_events[i_event].strip().split('Slice#=')[1].split(',')[0]))
reps = np.array(reps)
slices = np.array(slices)
# get repetitions numbers, acquisition time for each slice of each rep, real or dummy scan
n_slices = max(slices)+1
n_reps = int(epi_events.size/n_slices)
# get acquisition time of each slice for each rep
slices_table = np.zeros((int(n_reps), n_slices))
for i_slice in range(n_slices):
slices_table[:, i_slice] = epi_acqtimes[np.where(slices == i_slice)[0]]
# get rep number and dummy or real scan
reps_table = np.zeros((int(n_reps), 2)) # rep number, dummy/real scan (0: dummy scan, 1: real scan)
idx_new_rep = np.where(slices == min(slices))[0]
reps_table[:, 0] = reps[idx_new_rep] # get rep number
# define for each rep if it is a dummy scan or not
for i_new_rep in range(0, len(idx_new_rep)):
if reps_table[i_new_rep, 0] == 1 and reps_table[i_new_rep-1, 0] == 0:
# if we start the second real scan, the previous one was also a real scan
reps_table[i_new_rep-1, 1] = 1
reps_table[i_new_rep, 1] = 1
elif reps_table[i_new_rep, 0] > 1:
# if the rep number is more than 1, it was also a real scan
reps_table[i_new_rep, 1] = 1
return reps_table, slices_table
def plot_physio(time, physio_sig, epi_acqtime, reps_table, acq_window, out_fname):
fig = plt.figure(figsize=(20, 9.5))
plt.title('Saved to: '+out_fname+'_plot.pdf')
plt.plot(time/1000., physio_sig, '+-', label='physio signal', color='b')
# add vertical rectangle for each repetition period
legend_label_counter = [0, 0]
nSlices = int(len(epi_acqtime)/reps_table.shape[0])
for i_rep in range(reps_table.shape[0]):
if reps_table[i_rep, 1] == 1:
plt.axvspan(epi_acqtime[i_rep*nSlices]/1000., (epi_acqtime[i_rep*nSlices] + acq_window)/1000., facecolor='orange', alpha=0.15, label='acquisition window' if sum(legend_label_counter) == 0 else "_nolegend_")
plt.axvspan(epi_acqtime[i_rep*nSlices]/1000., epi_acqtime[i_rep*nSlices+nSlices-1]/1000., facecolor='r', alpha=0.25, label='repetitions' if legend_label_counter[0] == 0 else '_nolegend_')
legend_label_counter[0] += 1
else:
plt.axvspan(epi_acqtime[i_rep*nSlices]/1000., (epi_acqtime[i_rep*nSlices] + acq_window)/1000., facecolor='orange', alpha=0.15, label='acquisition window' if sum(legend_label_counter) == 0 else "_nolegend_")
plt.axvspan(epi_acqtime[i_rep*nSlices]/1000., epi_acqtime[i_rep*nSlices+nSlices-1]/1000., facecolor='gray', alpha=0.25, label='dummy scans' if legend_label_counter[1] == 0 else '_nolegend_')
legend_label_counter[1] += 1
# add vertical lines for each slice (each epi event actually)
for xc in epi_acqtime:
plt.axvline(x=xc/1000., color='g', label='slices' if np.where(epi_acqtime==xc)[0][0] == 0 else "_nolegend_")
plt.legend()
plt.xlabel('Time (s)')
plt.ylabel('Physio signal')
plt.show(block=True)
fig.savefig(out_fname+'_plot.pdf')
plt.close()
def extract_acqTimes_cmrr(triggerStartTime, acqTime_firstImg, acqStartTime, triggerEndTime):
"""
:param triggerStartTime: in milliseconds
:param acqTime_firstImg: datetime object
:param acqStartTime: datetime object (
:return: acquisition times in milliseconds
"""
# remove all triggers start time without paired trigger end time
idxTrigToKeep = []
for i_trig in range(len(triggerStartTime)):
if (i_trig == len(triggerStartTime)-1) and (triggerEndTime > triggerStartTime[i_trig]).any():
idxTrigToKeep.append(i_trig)
elif ((triggerEndTime > triggerStartTime[i_trig]) & (triggerEndTime < triggerStartTime[i_trig+1])).any():
idxTrigToKeep.append(i_trig)
triggerStartTime_notStopped = triggerStartTime[idxTrigToKeep]
# get the duration of dummy or auto-calibration scans in microseconds
seqInitDuration = acqTime_firstImg - acqStartTime
# only keep trigger times after the sequence initialization period
triggerFirstImg_idx = np.abs(triggerStartTime_notStopped - seqInitDuration.total_seconds()*1000).argmin()
acqTimes = triggerStartTime_notStopped[triggerFirstImg_idx:]
return acqTimes
def plot_physio_cmrr(time, trigger_start_times, trigger_end_times, physio_sig, acq_window, out_fname):
if trigger_start_times.shape[0] != trigger_end_times.shape[0]:
os.error("ERROR: Number of start and end times are different.")
fig = plt.figure("CMRR physiolog signal", figsize=(30, 20))
plt.plot(time, physio_sig, '+-', label='physio signal', color='b')
# add vertical rectangle for each trigger signal and add acquisition window
for i_trig in range(trigger_start_times.shape[0]):
plt.axvspan(trigger_start_times[i_trig], trigger_start_times[i_trig] + acq_window, facecolor='orange', alpha=0.15, label='acquisition window' if i_trig == 0 else "_nolegend_")
plt.axvspan(trigger_start_times[i_trig], trigger_end_times[i_trig], facecolor='g', alpha=0.25, label='trigger signal' if i_trig == 0 else "_nolegend_")
# # add vertical lines for trigger start and end
# for t_start, t_end in zip(trigger_start_times, trigger_end_times):
# plt.axvline(x=t_start, color='g')
# plt.axvline(x=t_end, color='r')
# plt.text(t_start, 1000, 'Trigger period = '+str(t_end-t_start)+'ms', rotation=90)
plt.legend()
plt.xlabel('Time (ms)')
plt.ylabel('Physio signal')
plt.show(block=False)
fig.savefig(out_fname+'_plot.pdf')
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(description='Plot physiological signal vs. time and return acquisition time for each repetition.')
optionalArgs = parser._action_groups.pop()
requiredArgs = parser.add_argument_group('required arguments')
requiredArgs.add_argument('-i', dest='ifname', help='Path to physio log file.', type=str, required=True)
requiredArgs.add_argument('-o', dest='ofname', help='Output file name for plot and file storing acquisition times.', type=str, required=True)
parser._action_groups.append(optionalArgs)
args = parser.parse_args()
# params
param_default = Param()
# run main
main(log_fname=args.ifname, out_fname=args.ofname)
|
the-stack_0_25702
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2020 Snowflake Computing Inc. All right reserved.
#
import itertools
import random
import time
from datetime import datetime
from decimal import Decimal
import pytest
try:
from snowflake.connector.options import installed_pandas, pandas # NOQA
except ImportError:
installed_pandas = False
pandas = None
try:
import pyarrow # NOQA
except ImportError:
pass
try:
from snowflake.connector.arrow_iterator import PyArrowIterator # NOQA
no_arrow_iterator_ext = False
except ImportError:
no_arrow_iterator_ext = True
SQL_ENABLE_ARROW = "alter session set python_connector_query_result_format='ARROW';"
EPSILON = 1e-8
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_num_one(conn_cnx):
print('Test fetching one single dataframe')
row_count = 50000
col_count = 2
random_seed = get_random_seed()
sql_exec = ("select seq4() as c1, uniform(1, 10, random({})) as c2 from ".format(random_seed) +
"table(generator(rowcount=>{})) order by c1, c2".format(row_count))
fetch_pandas(conn_cnx, sql_exec, row_count, col_count, 'one')
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_scaled_tinyint(conn_cnx):
cases = ["NULL", 0.11, -0.11, "NULL", 1.27, -1.28, "NULL"]
table = "test_arrow_tiny_int"
column = "(a number(5,2))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_scaled_smallint(conn_cnx):
cases = ["NULL", 0, 0.11, -0.11, "NULL", 32.767, -32.768, "NULL"]
table = "test_arrow_small_int"
column = "(a number(5,3))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_scaled_int(conn_cnx):
cases = ["NULL", 0, "NULL", 0.123456789, -0.123456789, 2.147483647, -2.147483648, "NULL"]
table = "test_arrow_int"
column = "(a number(10,9))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is not installed.")
def test_scaled_bigint(conn_cnx):
cases = ["NULL", 0, "NULL",
"1.23456789E-10", "-1.23456789E-10",
"2.147483647E-9", "-2.147483647E-9",
"-1e-9", "1e-9",
"1e-8", "-1e-8",
"NULL"]
table = "test_arrow_big_int"
column = "(a number(38,18))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', epsilon=EPSILON)
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_decimal(conn_cnx):
cases = ["NULL", 0, "NULL",
"10000000000000000000000000000000000000",
"12345678901234567890123456789012345678",
"99999999999999999999999999999999999999",
"-1000000000000000000000000000000000000",
"-2345678901234567890123456789012345678",
"-9999999999999999999999999999999999999",
"NULL"]
table = "test_arrow_decimal"
column = "(a number(38,0))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='decimal')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is not installed.")
def test_scaled_decimal(conn_cnx):
cases = ["NULL", 0, "NULL",
"1.0000000000000000000000000000000000000",
"1.2345678901234567890123456789012345678",
"9.9999999999999999999999999999999999999",
"-1.000000000000000000000000000000000000",
"-2.345678901234567890123456789012345678",
"-9.999999999999999999999999999999999999",
"NULL"]
table = "test_arrow_decimal"
column = "(a number(38,37))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='decimal')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is not installed.")
def test_scaled_decimal_SNOW_133561(conn_cnx):
cases = ["NULL", 0, "NULL",
"1.2345",
"2.1001",
"2.2001",
"2.3001",
"2.3456",
"-9.999",
"-1.000",
"-3.4567",
"3.4567",
"4.5678",
"5.6789",
"-0.0012",
"NULL"]
table = "test_scaled_decimal_SNOW_133561"
column = "(a number(38,10))"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='float')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_boolean(conn_cnx):
cases = ["NULL", True, "NULL", False, True, True, "NULL", True, False, "NULL"]
table = "test_arrow_boolean"
column = "(a boolean)"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_double(conn_cnx):
cases = ["NULL",
# SNOW-31249
"-86.6426540296895",
"3.14159265359",
# SNOW-76269
"1.7976931348623157E308",
"1.7E308",
"1.7976931348623151E308",
"-1.7976931348623151E308",
"-1.7E308",
"-1.7976931348623157E308",
"NULL"]
table = "test_arrow_double"
column = "(a double)"
values = "(" + "),(".join(["{}, {}".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_semi_struct(conn_cnx):
sql_text = """
select array_construct(10, 20, 30),
array_construct(null, 'hello', 3::double, 4, 5),
array_construct(),
object_construct('a',1,'b','BBBB', 'c',null),
object_construct('Key_One', parse_json('NULL'), 'Key_Two', null, 'Key_Three', 'null'),
to_variant(3.2),
parse_json('{ "a": null}'),
100::variant;
"""
res = [
"[\n" +
" 10,\n" +
" 20,\n" +
" 30\n" +
"]",
"[\n" +
" undefined,\n" +
" \"hello\",\n" +
" 3.000000000000000e+00,\n" +
" 4,\n" +
" 5\n" +
"]",
"[]",
"{\n" +
" \"a\": 1,\n" +
" \"b\": \"BBBB\"\n" +
"}",
"{\n" +
" \"Key_One\": null,\n" +
" \"Key_Three\": \"null\"\n" +
"}",
"3.2",
"{\n" +
" \"a\": null\n" +
"}",
"100"
]
with conn_cnx() as cnx_table:
# fetch dataframe with new arrow support
cursor_table = cnx_table.cursor()
cursor_table.execute(SQL_ENABLE_ARROW)
cursor_table.execute(sql_text)
df_new = cursor_table.fetch_pandas_all()
col_new = df_new.iloc[0]
for j, c_new in enumerate(col_new):
assert res[j] == c_new, '{} column: original value is {}, new value is {}, ' \
'values are not equal'.format(j, res[j], c_new)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_date(conn_cnx):
cases = ["NULL",
"2017-01-01",
"2014-01-02",
"2014-01-02",
"1970-01-01",
"1970-01-01",
"NULL",
"1969-12-31",
"0200-02-27",
"NULL",
"0200-02-28",
# "0200-02-29", # day is out of range
# "0000-01-01", # year 0 is out of range
"0001-12-31",
"NULL"]
table = "test_arrow_date"
column = "(a date)"
values = "(" + "),(".join(["{}, {}".format(i, c) if c == "NULL" else "{}, '{}'".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='date')
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
@pytest.mark.parametrize("scale",
[i for i in range(10)])
def test_time(conn_cnx, scale):
cases = ["NULL",
"00:00:51",
"01:09:03.100000",
"02:23:23.120000",
"03:56:23.123000",
"04:56:53.123400",
"09:01:23.123450",
"11:03:29.123456",
# note: Python's max time precision is microsecond, rest of them will lose precision
# "15:31:23.1234567",
# "19:01:43.12345678",
# "23:59:59.99999999",
"NULL"]
table = "test_arrow_time"
column = "(a time({}))".format(scale)
values = "(" + "),(".join(["{}, {}".format(i, c) if c == "NULL" else "{}, '{}'".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='time', scale=scale)
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
@pytest.mark.parametrize("scale", [i for i in range(10)])
def test_timestampntz(conn_cnx, scale):
cases = [
"NULL",
"1970-01-01 00:00:00",
"1970-01-01 00:00:01",
"1970-01-01 00:00:10",
"2014-01-02 16:00:00",
"2014-01-02 12:34:56",
"2017-01-01 12:00:00.123456789",
"2014-01-02 16:00:00.000000001",
"NULL",
"2014-01-02 12:34:57.1",
"1969-12-31 23:59:59.000000001",
"1970-01-01 00:00:00.123412423",
"1970-01-01 00:00:01.000001",
"1969-12-31 11:59:59.001",
# "0001-12-31 11:59:59.11",
# pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime:
# Out of bounds nanosecond timestamp: 1-12-31 11:59:59
"NULL"
]
table = "test_arrow_timestamp"
column = "(a timestampntz({}))".format(scale)
values = "(" + "),(".join(["{}, {}".format(i, c) if c == "NULL" else "{}, '{}'".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='timestamp', scale=scale)
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
@pytest.mark.parametrize("scale, timezone",
itertools.product(
[i for i in range(10)],
["UTC",
"America/New_York",
"Australia/Sydney"]))
def test_timestamptz(conn_cnx, scale, timezone):
cases = [
"NULL",
"1971-01-01 00:00:00",
"1971-01-11 00:00:01",
"1971-01-01 00:00:10",
"2014-01-02 16:00:00",
"2014-01-02 12:34:56",
"2017-01-01 12:00:00.123456789",
"2014-01-02 16:00:00.000000001",
"NULL",
"2014-01-02 12:34:57.1",
"1969-12-31 23:59:59.000000001",
"1970-01-01 00:00:00.123412423",
"1970-01-01 00:00:01.000001",
"1969-12-31 11:59:59.001",
# "0001-12-31 11:59:59.11",
# pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime:
# Out of bounds nanosecond timestamp: 1-12-31 11:59:59
"NULL"
]
table = "test_arrow_timestamp"
column = "(a timestamptz({}))".format(scale)
values = "(" + "),(".join(["{}, {}".format(i, c) if c == "NULL" else "{}, '{}'".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values, timezone=timezone)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='timestamptz', scale=scale, timezone=timezone)
finish(conn_cnx, table)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
@pytest.mark.parametrize("scale, timezone",
itertools.product(
[i for i in range(10)],
["UTC",
"America/New_York",
"Australia/Sydney"
]))
def test_timestampltz(conn_cnx, scale, timezone):
cases = [
"NULL",
"1970-01-01 00:00:00",
"1970-01-01 00:00:01",
"1970-01-01 00:00:10",
"2014-01-02 16:00:00",
"2014-01-02 12:34:56",
"2017-01-01 12:00:00.123456789",
"2014-01-02 16:00:00.000000001",
"NULL",
"2014-01-02 12:34:57.1",
"1969-12-31 23:59:59.000000001",
"1970-01-01 00:00:00.123412423",
"1970-01-01 00:00:01.000001",
"1969-12-31 11:59:59.001",
# "0001-12-31 11:59:59.11",
# pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime:
# Out of bounds nanosecond timestamp: 1-12-31 11:59:59
"NULL"
]
table = "test_arrow_timestamp"
column = "(a timestampltz({}))".format(scale)
values = "(" + "),(".join(["{}, {}".format(i, c) if c == "NULL" else "{}, '{}'".format(i, c) for i, c in enumerate(cases)]) + ")"
init(conn_cnx, table, column, values, timezone=timezone)
sql_text = "select a from {} order by s".format(table)
validate_pandas(conn_cnx, sql_text, cases, 1, 'one', data_type='timestamp', scale=scale, timezone=timezone)
finish(conn_cnx, table)
def validate_pandas(conn_cnx, sql, cases, col_count, method='one', data_type='float', epsilon=None, scale=0, timezone=None):
"""Tests that parameters can be customized.
Args:
conn_cnx: Connection object.
sql: SQL command for execution.
cases: Test cases.
col_count: Number of columns in dataframe.
method: If method is 'batch', we fetch dataframes in batch. If method is 'one', we fetch a single dataframe
containing all data (Default value = 'one').
data_type: Defines how to compare values (Default value = 'float').
epsilon: For comparing double values (Default value = None).
scale: For comparing time values with scale (Default value = 0).
timezone: For comparing timestamp ltz (Default value = None).
"""
row_count = len(cases)
assert col_count != 0, '# of columns should be larger than 0'
with conn_cnx() as cnx_table:
# fetch dataframe with new arrow support
cursor_table = cnx_table.cursor()
cursor_table.execute(SQL_ENABLE_ARROW)
cursor_table.execute(sql)
# build dataframe
total_rows, total_batches = 0, 0
start_time = time.time()
if method == 'one':
df_new = cursor_table.fetch_pandas_all()
total_rows = df_new.shape[0]
else:
for df_new in cursor_table.fetch_pandas_batches():
total_rows += df_new.shape[0]
total_batches += 1
end_time = time.time()
print('new way (fetching {}) took {}s'.format(method, end_time - start_time))
if method == 'batch':
print('new way has # of batches : {}'.format(total_batches))
cursor_table.close()
assert total_rows == row_count, 'there should be {} rows, but {} rows'.format(row_count, total_rows)
# verify the correctness
# only do it when fetch one dataframe
if method == 'one':
assert (row_count, col_count) == df_new.shape, 'the shape of old dataframe is {}, ' \
'the shape of new dataframe is {}, ' \
'shapes are not equal'.format((row_count, col_count),
df_new.shape)
for i in range(row_count):
for j in range(col_count):
c_new = df_new.iat[i, j]
if cases[i] == "NULL":
assert c_new is None or pandas.isnull(c_new), '{} row, {} column: original value is NULL, ' \
'new value is {}, values are not equal'.format(
i, j, c_new)
else:
if data_type == 'float':
c_case = float(cases[i])
elif data_type == 'decimal':
c_case = Decimal(cases[i])
elif data_type == 'date':
c_case = datetime.strptime(cases[i], '%Y-%m-%d').date()
elif data_type == 'time':
time_str_len = 8 if scale == 0 else 9 + scale
c_case = cases[i].strip()[:time_str_len]
c_new = str(c_new).strip()[:time_str_len]
assert c_case == c_new, '{} row, {} column: original value is {}, ' \
'new value is {}, ' \
'values are not equal'.format(i, j, cases[i],
c_new)
break
elif data_type.startswith('timestamp'):
time_str_len = 19 if scale == 0 else 20 + scale
if timezone:
c_case = pandas.Timestamp(cases[i][:time_str_len], tz=timezone)
if data_type == 'timestamptz':
c_case = c_case.tz_convert('UTC')
c_case = c_case.tz_localize(None)
else:
c_case = pandas.Timestamp(cases[i][:time_str_len])
assert c_case == c_new, '{} row, {} column: original value is {}, new value is {}, ' \
'values are not equal'.format(i, j, cases[i], c_new)
break
else:
c_case = cases[i]
if epsilon is None:
assert c_case == c_new, '{} row, {} column: original value is {}, new value is {}, ' \
'values are not equal'.format(i, j, cases[i], c_new)
else:
assert abs(c_case - c_new) < epsilon, '{} row, {} column: original value is {}, ' \
'new value is {}, epsilon is {} \
values are not equal'.format(i, j, cases[i], c_new, epsilon)
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_num_batch(conn_cnx):
print('Test fetching dataframes in batch')
row_count = 1000000
col_count = 2
random_seed = get_random_seed()
sql_exec = ("select seq4() as c1, uniform(1, 10, random({})) as c2 from ".format(random_seed) +
"table(generator(rowcount=>{})) order by c1, c2".format(row_count))
fetch_pandas(conn_cnx, sql_exec, row_count, col_count, 'batch')
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_empty(conn_cnx):
print('Test fetch empty dataframe')
with conn_cnx() as cnx:
cursor = cnx.cursor()
cursor.execute(SQL_ENABLE_ARROW)
cursor.execute("select seq4() as foo, seq4() as bar from table(generator(rowcount=>1)) limit 0")
result = cursor.fetch_pandas_all()
assert result.empty
assert len(list(result)) == 2
assert list(result)[0] == 'FOO'
assert list(result)[1] == 'BAR'
cursor.execute("select seq4() as foo from table(generator(rowcount=>1)) limit 0")
df_count = 0
for _ in cursor.fetch_pandas_batches():
df_count += 1
assert df_count == 0
def get_random_seed():
random.seed(datetime.now())
return random.randint(0, 10000)
def fetch_pandas(conn_cnx, sql, row_count, col_count, method='one'):
"""Tests that parameters can be customized.
Args:
conn_cnx: Connection object.
sql: SQL command for execution.
row_count: Number of total rows combining all dataframes.
col_count: Number of columns in dataframe.
method: If method is 'batch', we fetch dataframes in batch. If method is 'one', we fetch a single dataframe
containing all data (Default value = 'one').
"""
assert row_count != 0, '# of rows should be larger than 0'
assert col_count != 0, '# of columns should be larger than 0'
with conn_cnx() as cnx_row:
with conn_cnx() as cnx_table:
# fetch dataframe by fetching row by row
cursor_row = cnx_row.cursor()
cursor_row.execute(SQL_ENABLE_ARROW)
cursor_row.execute(sql)
# build dataframe
# actually its exec time would be different from `pandas.read_sql()` via sqlalchemy as most people use
# further perf test can be done separately
start_time = time.time()
rows = 0
if method == 'one':
df_old = pandas.DataFrame(cursor_row.fetchall(), columns=['c{}'.format(i) for i in range(col_count)])
else:
print("use fetchmany")
while True:
dat = cursor_row.fetchmany(10000)
if not dat:
break
else:
df_old = pandas.DataFrame(dat, columns=['c{}'.format(i) for i in range(col_count)])
rows += df_old.shape[0]
end_time = time.time()
print('The original way took {}s'.format(end_time - start_time))
cursor_row.close()
# fetch dataframe with new arrow support
cursor_table = cnx_table.cursor()
cursor_table.execute(SQL_ENABLE_ARROW)
cursor_table.execute(sql)
# build dataframe
total_rows, total_batches = 0, 0
start_time = time.time()
if method == 'one':
df_new = cursor_table.fetch_pandas_all()
total_rows = df_new.shape[0]
else:
for df_new in cursor_table.fetch_pandas_batches():
total_rows += df_new.shape[0]
total_batches += 1
end_time = time.time()
print('new way (fetching {}) took {}s'.format(method, end_time - start_time))
if method == 'batch':
print('new way has # of batches : {}'.format(total_batches))
cursor_table.close()
assert total_rows == row_count, 'there should be {} rows, but {} rows'.format(row_count, total_rows)
# verify the correctness
# only do it when fetch one dataframe
if method == 'one':
assert df_old.shape == df_new.shape, 'the shape of old dataframe is {}, the shape of new dataframe is {}, \
shapes are not equal'.format(df_old.shape, df_new.shape)
for i in range(row_count):
col_old = df_old.iloc[i]
col_new = df_new.iloc[i]
for j, (c_old, c_new) in enumerate(zip(col_old, col_new)):
assert c_old == c_new, '{} row, {} column: old value is {}, new value is {}, \
values are not equal'.format(i, j, c_old, c_new)
else:
assert rows == total_rows, 'the number of rows are not equal {} vs {}'.format(rows, total_rows)
def init(conn_cnx, table, column, values, timezone=None):
with conn_cnx() as json_cnx:
cursor_json = json_cnx.cursor()
if timezone is not None:
cursor_json.execute("ALTER SESSION SET TIMEZONE = '{}'".format(timezone))
column_with_seq = column[0] + 's number, ' + column[1:]
cursor_json.execute("create or replace table {} {}".format(table, column_with_seq))
cursor_json.execute("insert into {} values {}".format(table, values))
def finish(conn_cnx, table):
with conn_cnx() as json_cnx:
cursor_json = json_cnx.cursor()
cursor_json.execute("drop table if exists {};".format(table))
@pytest.mark.skipif(
not installed_pandas or no_arrow_iterator_ext,
reason="arrow_iterator extension is not built, or pandas is missing.")
def test_arrow_fetch_result_scan(conn_cnx):
with conn_cnx() as cnx:
cur = cnx.cursor()
cur.execute("alter session set query_result_format='ARROW_FORCE'")
cur.execute("alter session set python_connector_query_result_format='ARROW_FORCE'")
res = cur.execute("select 1, 2, 3").fetch_pandas_all()
assert tuple(res) == ('1', '2', '3')
result_scan_res = cur.execute("select * from table(result_scan('{}'));".format(cur.sfqid)).fetch_pandas_all()
assert tuple(result_scan_res) == ('1', '2', '3')
@pytest.mark.parametrize('query_format', ('JSON', 'ARROW'))
@pytest.mark.parametrize('resultscan_format', ('JSON', 'ARROW'))
def test_query_resultscan_combos(conn_cnx, query_format, resultscan_format):
if query_format == 'JSON' and resultscan_format == 'ARROW':
pytest.xfail("fix not yet released to test deployment")
with conn_cnx() as cnx:
sfqid = None
results = None
scanned_results = None
with cnx.cursor() as query_cur:
query_cur.execute("alter session set python_connector_query_result_format='{}'".format(query_format))
query_cur.execute("select seq8(), randstr(1000,random()) from table(generator(rowcount=>100))")
sfqid = query_cur.sfqid
assert query_cur._query_result_format.upper() == query_format
if query_format == 'JSON':
results = query_cur.fetchall()
else:
results = query_cur.fetch_pandas_all()
with cnx.cursor() as resultscan_cur:
resultscan_cur.execute("alter session set python_connector_query_result_format='{}'".format(resultscan_format))
resultscan_cur.execute("select * from table(result_scan('{}'))".format(sfqid))
if resultscan_format == 'JSON':
scanned_results = resultscan_cur.fetchall()
else:
scanned_results = resultscan_cur.fetch_pandas_all()
assert resultscan_cur._query_result_format.upper() == resultscan_format
if isinstance(results, pandas.DataFrame):
results = [tuple(e) for e in results.values.tolist()]
if isinstance(scanned_results, pandas.DataFrame):
scanned_results = [tuple(e) for e in scanned_results.values.tolist()]
assert results == scanned_results
|
the-stack_0_25704
|
import datetime
from collections import defaultdict
from dagster import PartitionSetDefinition, ScheduleExecutionContext
from dagster.core.storage.pipeline_run import PipelineRunStatus, PipelineRunsFilter
from dagster.utils.partitions import date_partition_range
def _fetch_runs_by_partition(instance, partition_set_def, status_filters=None):
# query runs db for this partition set
filters = PipelineRunsFilter(tags={'dagster/partition_set': partition_set_def.name})
partition_set_runs = instance.get_runs(filters)
runs_by_partition = defaultdict(list)
for run in partition_set_runs:
if not status_filters or run.status in status_filters:
runs_by_partition[run.tags['dagster/partition']].append(run)
return runs_by_partition
def backfilling_partition_selector(
context: ScheduleExecutionContext, partition_set_def: PartitionSetDefinition, retry_failed=False
):
status_filters = [PipelineRunStatus.SUCCESS] if retry_failed else None
runs_by_partition = _fetch_runs_by_partition(
context.instance, partition_set_def, status_filters
)
selected = None
for partition in partition_set_def.get_partitions():
runs = runs_by_partition[partition.name]
selected = partition
# break when we find the first empty partition
if len(runs) == 0:
break
# may return an already satisfied final partition - bank on should_execute to prevent firing in schedule
return selected
def backfill_should_execute(context, partition_set_def, retry_failed=False):
status_filters = (
[PipelineRunStatus.STARTED, PipelineRunStatus.SUCCESS] if retry_failed else None
)
runs_by_partition = _fetch_runs_by_partition(
context.instance, partition_set_def, status_filters
)
for runs in runs_by_partition.values():
for run in runs:
# if any active runs - don't start a new one
if run.status == PipelineRunStatus.STARTED:
return False # would be nice to return a reason here
available_partitions = set([partition.name for partition in partition_set_def.get_partitions()])
satisfied_partitions = set(runs_by_partition.keys())
is_remaining_partitions = bool(available_partitions.difference(satisfied_partitions))
return is_remaining_partitions
def backfill_test_schedule():
schedule_name = 'backfill_unreliable_weekly'
# create weekly partition set
partition_set = PartitionSetDefinition(
name='unreliable_weekly',
pipeline_name='unreliable_pipeline',
partition_fn=date_partition_range(
# first sunday of the year
start=datetime.datetime(2020, 1, 5),
delta=datetime.timedelta(weeks=1),
),
run_config_fn_for_partition=lambda _: {'storage': {'filesystem': {}}},
)
def _should_execute(context):
return backfill_should_execute(context, partition_set)
return partition_set.create_schedule_definition(
schedule_name=schedule_name,
cron_schedule="* * * * *", # tick every minute
partition_selector=backfilling_partition_selector,
should_execute=_should_execute,
)
def materialization_schedule():
# create weekly partition set
schedule_name = 'many_events_partitioned'
partition_set = PartitionSetDefinition(
name='many_events_minutely',
pipeline_name='many_events',
partition_fn=date_partition_range(start=datetime.datetime(2020, 1, 1)),
run_config_fn_for_partition=lambda _: {'storage': {'filesystem': {}}},
)
def _should_execute(context):
return backfill_should_execute(context, partition_set)
return partition_set.create_schedule_definition(
schedule_name=schedule_name,
cron_schedule="* * * * *", # tick every minute
partition_selector=backfilling_partition_selector,
should_execute=_should_execute,
)
def longitudinal_schedule():
from .toys.longitudinal import longitudinal_config
schedule_name = 'longitudinal_demo'
partition_set = PartitionSetDefinition(
name='ingest_and_train',
pipeline_name='longitudinal_pipeline',
partition_fn=date_partition_range(start=datetime.datetime(2020, 1, 1)),
run_config_fn_for_partition=longitudinal_config,
)
def _should_execute(context):
return backfill_should_execute(context, partition_set, retry_failed=True)
def _partition_selector(context, partition_set):
return backfilling_partition_selector(context, partition_set, retry_failed=True)
return partition_set.create_schedule_definition(
schedule_name=schedule_name,
cron_schedule="*/5 * * * *", # tick every 5 minutes
partition_selector=_partition_selector,
should_execute=_should_execute,
)
def get_bay_bikes_schedules():
from dagster_examples.bay_bikes.schedules import (
daily_weather_ingest_schedule,
daily_weather_schedule,
monthly_trip_ingest_schedule,
)
return [daily_weather_ingest_schedule, daily_weather_schedule, monthly_trip_ingest_schedule]
def get_toys_schedules():
from dagster import ScheduleDefinition, file_relative_path
return [
backfill_test_schedule(),
longitudinal_schedule(),
materialization_schedule(),
ScheduleDefinition(
name="many_events_every_min",
cron_schedule="* * * * *",
pipeline_name='many_events',
run_config_fn=lambda _: {"storage": {"filesystem": {}}},
),
ScheduleDefinition(
name="pandas_hello_world_hourly",
cron_schedule="0 * * * *",
pipeline_name="pandas_hello_world_pipeline",
run_config_fn=lambda _: {
'solids': {
'mult_solid': {
'inputs': {
'num_df': {
'csv': {
'path': file_relative_path(
__file__, "pandas_hello_world/data/num.csv"
)
}
}
}
},
'sum_solid': {
'inputs': {
'num_df': {
'csv': {
'path': file_relative_path(
__file__, "pandas_hello_world/data/num.csv"
)
}
}
}
},
},
"storage": {"filesystem": {}},
},
),
]
|
the-stack_0_25705
|
from collections import namedtuple
from django.db.models import Sum
from django.template.response import TemplateResponse
from django.urls import reverse_lazy as reverse
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from ...forms.reports.events import EventPaymentsForm, EventPaymentsStatusForm, EventStatsForm
from ...models.agegroup import AgeGroup
from ...models.citizenship import Citizenship
from ...models.events import Event, EventRegistration
from ...models.roles import Participant
from ...models.subjects import SubjectPayment, SubjectRegistrationParticipant, SubjectType
from ...views.generic import FormView
class ReportEventPaymentsView(FormView):
form_class = EventPaymentsForm
template_name = "leprikon/reports/event_payments.html"
title = _("Event payments")
submit_label = _("Show")
back_url = reverse("leprikon:report_list")
def form_valid(self, form):
context = form.cleaned_data
context["form"] = form
context["received_payments"] = SubjectPayment.objects.filter(
target_registration__subject__subject_type__subject_type=SubjectType.EVENT,
accounted__gte=context["date_start"],
accounted__lte=context["date_end"],
)
context["returned_payments"] = SubjectPayment.objects.filter(
source_registration__subject__subject_type__subject_type=SubjectType.EVENT,
accounted__gte=context["date_start"],
accounted__lte=context["date_end"],
)
context["received_payments_sum"] = context["received_payments"].aggregate(sum=Sum("amount"))["sum"] or 0
context["returned_payments_sum"] = context["returned_payments"].aggregate(sum=Sum("amount"))["sum"] or 0
context["sum"] = context["received_payments_sum"] - context["returned_payments_sum"]
return TemplateResponse(self.request, self.template_name, self.get_context_data(**context))
class ReportEventPaymentsStatusView(FormView):
form_class = EventPaymentsStatusForm
template_name = "leprikon/reports/event_payments_status.html"
title = _("Event payments status")
submit_label = _("Show")
back_url = reverse("leprikon:report_list")
EventPaymentsStatusSums = namedtuple("EventPaymentsStatusSums", ("registrations", "status"))
def form_valid(self, form):
context = form.cleaned_data
context["form"] = form
context["reports"] = [
self.Report(event, context["date"]) for event in Event.objects.filter(school_year=self.request.school_year)
]
context["sum"] = self.EventPaymentsStatusSums(
registrations=sum(len(r.registration_statuses) for r in context["reports"]),
status=sum(r.status for r in context["reports"]),
)
return TemplateResponse(self.request, self.template_name, self.get_context_data(**context))
class Report:
def __init__(self, event, d):
self.event = event
self.date = d
RegPaymentStatus = namedtuple("RegPaymentStatus", ("registration", "status"))
@cached_property
def registration_statuses(self):
return [
registration_status
for registration_status in (
self.RegPaymentStatus(
registration=registration,
status=registration.get_payment_status(self.date),
)
for registration in EventRegistration.objects.filter(
subject=self.event,
approved__date__lte=self.date,
)
)
if registration_status.status.receivable
]
@cached_property
def status(self):
return sum(rs.status for rs in self.registration_statuses)
class ReportEventStatsView(FormView):
form_class = EventStatsForm
template_name = "leprikon/reports/event_stats.html"
title = _("Event statistics")
submit_label = _("Show")
back_url = reverse("leprikon:report_list")
ReportItem = namedtuple("ReportItem", ("age_group", "all", "boys", "girls", "citizenships"))
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["school_year"] = self.request.school_year
return kwargs
def form_valid(self, form):
d = form.cleaned_data["date"]
paid_only = form.cleaned_data["paid_only"]
context = form.cleaned_data
context["form"] = form
participants = (
SubjectRegistrationParticipant.objects.filter(
registration__subject__in=form.cleaned_data["events"],
registration__approved__date__lte=d,
)
.exclude(registration__canceled__date__lte=d)
.select_related("registration", "age_group")
)
if paid_only:
participants = [
participant
for participant in participants
if participant.registration.eventregistration.get_payment_status(d).balance >= 0
]
else:
participants = list(participants)
context["events_count"] = len(set(participant.registration.subject_id for participant in participants))
citizenships = list(Citizenship.objects.all())
context["citizenships"] = citizenships
context["participants_counts"] = self.ReportItem(
age_group=None,
all=len(participants),
boys=len([p for p in participants if p.gender == Participant.MALE]),
girls=len([p for p in participants if p.gender == Participant.FEMALE]),
citizenships=[
len([p for p in participants if p.citizenship_id == citizenship.id]) for citizenship in citizenships
],
)
context["participants_counts_by_age_groups"] = []
for age_group in AgeGroup.objects.all():
parts = [p for p in participants if p.age_group == age_group]
context["participants_counts_by_age_groups"].append(
self.ReportItem(
age_group=age_group,
all=len(parts),
boys=len([p for p in parts if p.gender == Participant.MALE]),
girls=len([p for p in parts if p.gender == Participant.FEMALE]),
citizenships=[
len([p for p in parts if p.citizenship_id == citizenship.id]) for citizenship in citizenships
],
)
)
return TemplateResponse(self.request, self.template_name, self.get_context_data(**context))
|
the-stack_0_25707
|
"""HTML utilities suitable for global use."""
import html
import json
import re
from html.parser import HTMLParser
from urllib.parse import (
parse_qsl,
quote,
unquote,
urlencode,
urlsplit,
urlunsplit,
)
from django.utils.encoding import punycode
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, SafeString, mark_safe
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION_CHARS = ".,:;!"
WRAPPING_PUNCTUATION = [("(", ")"), ("[", "]")]
# List of possible strings used for bullets in bulleted lists.
DOTS = ["·", "*", "\u2022", "•", "•", "•"]
word_split_re = _lazy_re_compile(r"""([\s<>"']+)""")
simple_url_re = _lazy_re_compile(r"^https?://\[?\w", re.IGNORECASE)
simple_url_2_re = _lazy_re_compile(
r"^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$", re.IGNORECASE
)
@keep_lazy(str, SafeString)
def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return mark_safe(html.escape(str(text)))
_js_escapes = {
ord("\\"): "\\u005C",
ord("'"): "\\u0027",
ord('"'): "\\u0022",
ord(">"): "\\u003E",
ord("<"): "\\u003C",
ord("&"): "\\u0026",
ord("="): "\\u003D",
ord("-"): "\\u002D",
ord(";"): "\\u003B",
ord("`"): "\\u0060",
ord("\u2028"): "\\u2028",
ord("\u2029"): "\\u2029",
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord("%c" % z), "\\u%04X" % z) for z in range(32))
@keep_lazy(str, SafeString)
def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes))
_json_script_escapes = {
ord(">"): "\\u003E",
ord("<"): "\\u003C",
ord("&"): "\\u0026",
}
def json_script(value, element_id):
"""
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
"""
from django.core.serializers.json import DjangoJSONEncoder
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
return format_html(
'<script id="{}" type="application/json">{}</script>',
element_id,
mark_safe(json_str),
)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, "__html__"):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(
conditional_escape(sep).join(
format_html(format_string, *args) for args in args_generator
)
)
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br>s."""
value = normalize_newlines(value)
paras = re.split("\n{2,}", str(value))
if autoescape:
paras = ["<p>%s</p>" % escape(p).replace("\n", "<br>") for p in paras]
else:
paras = ["<p>%s</p>" % p.replace("\n", "<br>") for p in paras]
return "\n\n".join(paras)
class MLStripper(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append("&%s;" % name)
def handle_charref(self, name):
self.fed.append("&#%s;" % name)
def get_data(self):
return "".join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = str(value)
while "<" in value and ">" in value:
new_value = _strip_once(value)
if value.count("<") == new_value.count("<"):
# _strip_once wasn't able to detect more tags.
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r">\s+<", "><", str(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# https://tools.ietf.org/html/rfc3986#section-2.3
# See also https://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + "~")
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = punycode(netloc) # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [
(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)
]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit - 1 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return "%s…" % x[: max(0, limit - 1)]
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening) :]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (
middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1
):
middle = middle[: -len(closing)]
trail = closing + trail
trimmed_something = True
# Trim trailing punctuation (after trimming wrapping punctuation,
# as encoded entities contain ';'). Unescape entities to avoid
# breaking them by removing ';'.
middle_unescaped = html.unescape(middle)
stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle_unescaped != stripped:
punctuation_count = len(middle_unescaped) - len(stripped)
trail = middle[-punctuation_count:] + trail
middle = middle[:-punctuation_count]
trimmed_something = True
return lead, middle, trail
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if "@" not in value or value.startswith("@") or value.endswith("@"):
return False
try:
p1, p2 = value.split("@")
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if "." not in p2 or p2.startswith("."):
return False
return True
words = word_split_re.split(str(text))
for i, word in enumerate(words):
if "." in word or "@" in word or ":" in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = "", word, ""
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ""
if simple_url_re.match(middle):
url = smart_urlquote(html.unescape(middle))
elif simple_url_2_re.match(middle):
url = smart_urlquote("http://%s" % html.unescape(middle))
elif ":" not in middle and is_email_simple(middle):
local, domain = middle.rsplit("@", 1)
try:
domain = punycode(domain)
except UnicodeError:
continue
url = "mailto:%s@%s" % (local, domain)
nofollow_attr = ""
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe("%s%s%s" % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return "".join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
"""
if "__html__" in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if "__str__" not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
|
the-stack_0_25708
|
from player.player_shelter import PlayerShelter
from logic.game_state import GameState
from logic.city_card import CityCard
from tests.common import zombie, fast_zombie, big_zombie, dumper_factory
import tests.common
from enums.supply import Supply
def test_sanity_check():
assert fast_zombie
assert zombie
assert big_zombie
def test_player_shelter_class():
shelter = PlayerShelter()
assert shelter.name == ''
assert not shelter.defeated
assert len(shelter.zombies) == 0
assert len(shelter.supplies) == 0
assert len(shelter.obstacles) == 0
assert len(shelter.survivors) == 0
assert type(shelter.zombies) is list
assert type(shelter.supplies) is list
assert type(shelter.obstacles) is list
assert type(shelter.survivors) is list
def test_gui_default(fast_zombie, zombie, big_zombie):
gs = GameState()
gs.players = [PlayerShelter('Name_1'), PlayerShelter('Name_2'), PlayerShelter('Name_3')]
gs.players[1].zombies = [zombie, fast_zombie, fast_zombie, big_zombie, zombie]
gs.players[1].obstacles = [Supply.BARRICADES, Supply.ALARM]
gs.players[1].survivors = [CityCard(), CityCard()]
gs.players[2].zombies = [big_zombie, big_zombie]
gs.players[2].obstacles = [Supply.MINE_FILED]
gs.players[2].survivors = [CityCard(), CityCard(), CityCard()]
gs.active_player = gs.players[0]
shelter = gs.active_player
shelter.print = dumper_factory()
shelter.zombies = [zombie]
shelter.supplies = [Supply.AXE, Supply.MINE_FILED, Supply.LURE_OUT]
shelter.obstacles = [Supply.MINE_FILED, Supply.ALARM]
shelter.survivors = [CityCard(), CityCard(), CityCard(), CityCard()]
shelter.gui_default(gs)
assert len(tests.common.outputs) == 1
assert len(tests.common.outputs[0]) == 684
gs.city_deck = [zombie, CityCard()]
shelter.gui_default(gs)
assert len(tests.common.outputs) == 2
assert len(tests.common.outputs[1]) == 706
|
the-stack_0_25709
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from openstackclient.tests.functional.network.v2 import common
class NetworkTests(common.NetworkTagTests):
"""Functional tests for network"""
base_command = 'network'
def setUp(self):
super(NetworkTests, self).setUp()
# Nothing in this class works with Nova Network
if not self.haz_network:
self.skipTest("No Network service present")
def test_network_create_compute(self):
"""Test Nova-net create options, delete"""
if self.haz_network:
self.skipTest("Skip Nova-net test")
# Network create with minimum options
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--subnet 1.2.3.4/28 ' +
name1
))
self.addCleanup(self.openstack, 'network delete ' + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name1,
cmd_output["label"],
)
self.assertEqual(
'1.2.3.0/28',
cmd_output["cidr"],
)
# Network create with more options
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--subnet 1.2.4.4/28 ' +
'--share ' +
name2
))
self.addCleanup(self.openstack, 'network delete ' + name2)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name2,
cmd_output["label"],
)
self.assertEqual(
'1.2.4.0/28',
cmd_output["cidr"],
)
self.assertTrue(
cmd_output["share_address"],
)
def test_network_create_network(self):
"""Test Neutron create options, delete"""
if not self.haz_network:
self.skipTest("No Network service present")
# Get project IDs
cmd_output = json.loads(self.openstack('token issue -f json '))
auth_project_id = cmd_output['project_id']
cmd_output = json.loads(self.openstack('project list -f json '))
admin_project_id = None
demo_project_id = None
for p in cmd_output:
if p['Name'] == 'admin':
admin_project_id = p['ID']
if p['Name'] == 'demo':
demo_project_id = p['ID']
# Verify assumptions:
# * admin and demo projects are present
# * demo and admin are distinct projects
# * tests run as admin
self.assertIsNotNone(admin_project_id)
self.assertIsNotNone(demo_project_id)
self.assertNotEqual(admin_project_id, demo_project_id)
self.assertEqual(admin_project_id, auth_project_id)
# Network create with no options
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
name1
))
self.addCleanup(self.openstack, 'network delete ' + name1)
self.assertIsNotNone(cmd_output["id"])
# Check the default values
self.assertEqual(
admin_project_id,
cmd_output["project_id"],
)
self.assertEqual(
'',
cmd_output["description"],
)
self.assertEqual(
'UP',
cmd_output["admin_state_up"],
)
self.assertFalse(
cmd_output["shared"],
)
self.assertEqual(
'Internal',
cmd_output["router:external"],
)
# Network create with options
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--project demo ' +
name2
))
self.addCleanup(self.openstack, 'network delete ' + name2)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
demo_project_id,
cmd_output["project_id"],
)
self.assertEqual(
'',
cmd_output["description"],
)
def test_network_delete_compute(self):
"""Test create, delete multiple"""
if self.haz_network:
self.skipTest("Skip Nova-net test")
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--subnet 9.8.7.6/28 ' +
name1
))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name1,
cmd_output["label"],
)
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--subnet 8.7.6.5/28 ' +
name2
))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name2,
cmd_output["label"],
)
def test_network_delete_network(self):
"""Test create, delete multiple"""
if not self.haz_network:
self.skipTest("No Network service present")
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--description aaaa ' +
name1
))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
'aaaa',
cmd_output["description"],
)
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json ' +
'--description bbbb ' +
name2
))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
'bbbb',
cmd_output["description"],
)
del_output = self.openstack('network delete %s %s' % (name1, name2))
self.assertOutput('', del_output)
def test_network_list(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
if self.haz_network:
network_options = '--description aaaa --no-default '
else:
network_options = '--subnet 3.4.5.6/28 '
cmd_output = json.loads(self.openstack(
'network create -f json ' +
network_options +
name1
))
self.addCleanup(self.openstack, 'network delete %s' % name1)
self.assertIsNotNone(cmd_output["id"])
if self.haz_network:
self.assertEqual(
'aaaa',
cmd_output["description"],
)
# Check the default values
self.assertEqual(
'UP',
cmd_output["admin_state_up"],
)
self.assertFalse(cmd_output["shared"])
self.assertEqual(
'Internal',
cmd_output["router:external"],
)
self.assertFalse(cmd_output["is_default"])
self.assertTrue(
cmd_output["port_security_enabled"]
)
else:
self.assertEqual(
'3.4.5.0/28',
cmd_output["cidr"],
)
name2 = uuid.uuid4().hex
if self.haz_network:
network_options = '--description bbbb --disable '
else:
network_options = '--subnet 4.5.6.7/28 '
cmd_output = json.loads(self.openstack(
'network create -f json --share %s%s' %
(network_options, name2)
))
self.addCleanup(self.openstack, 'network delete ' + name2)
self.assertIsNotNone(cmd_output["id"])
if self.haz_network:
self.assertEqual(
'bbbb',
cmd_output["description"],
)
self.assertEqual(
'DOWN',
cmd_output["admin_state_up"],
)
self.assertTrue(cmd_output["shared"])
self.assertFalse(cmd_output["is_default"])
self.assertTrue(cmd_output["port_security_enabled"])
else:
self.assertEqual(
'4.5.6.0/28',
cmd_output["cidr"],
)
self.assertTrue(cmd_output["share_address"])
# Test list
cmd_output = json.loads(self.openstack(
"network list -f json "
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --long
if self.haz_network:
cmd_output = json.loads(self.openstack(
"network list -f json --long"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --long --enable
if self.haz_network:
cmd_output = json.loads(self.openstack(
"network list -f json --enable --long"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --long --disable
if self.haz_network:
cmd_output = json.loads(self.openstack(
"network list -f json --disable --long"
))
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --share
if self.haz_network:
cmd_output = json.loads(self.openstack(
"network list -f json --share "
))
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --no-share
if self.haz_network:
cmd_output = json.loads(self.openstack(
"network list -f json --no-share "
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
def test_network_dhcp_agent(self):
if not self.haz_network:
self.skipTest("No Network service present")
name1 = uuid.uuid4().hex
cmd_output1 = json.loads(self.openstack(
'network create -f json --description aaaa %s' % name1
))
self.addCleanup(self.openstack, 'network delete %s' % name1)
# Get network ID
network_id = cmd_output1['id']
# Get DHCP Agent ID
cmd_output2 = json.loads(self.openstack(
'network agent list -f json --agent-type dhcp'
))
agent_id = cmd_output2[0]['ID']
# Add Agent to Network
self.openstack(
'network agent add network --dhcp %s %s' % (agent_id, network_id)
)
# Test network list --agent
cmd_output3 = json.loads(self.openstack(
'network list -f json --agent %s' % agent_id
))
# Cleanup
# Remove Agent from Network
self.openstack(
'network agent remove network --dhcp %s %s' %
(agent_id, network_id)
)
# Assert
col_name = [x["ID"] for x in cmd_output3]
self.assertIn(
network_id, col_name
)
def test_network_set(self):
"""Tests create options, set, show, delete"""
if not self.haz_network:
self.skipTest("No Network service present")
name = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'network create -f json '
'--description aaaa '
'--enable '
'--no-share '
'--internal '
'--no-default '
'--enable-port-security %s' %
name
))
self.addCleanup(self.openstack, 'network delete %s' % name)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
'aaaa',
cmd_output["description"],
)
self.assertEqual(
'UP',
cmd_output["admin_state_up"],
)
self.assertFalse(cmd_output["shared"])
self.assertEqual(
'Internal',
cmd_output["router:external"],
)
self.assertFalse(cmd_output["is_default"])
self.assertTrue(
cmd_output["port_security_enabled"]
)
raw_output = self.openstack(
'network set '
'--description cccc '
'--disable '
'--share '
'--external '
'--disable-port-security %s' %
name
)
self.assertOutput('', raw_output)
cmd_output = json.loads(self.openstack(
'network show -f json ' + name
))
self.assertEqual(
'cccc',
cmd_output["description"],
)
self.assertEqual(
'DOWN',
cmd_output["admin_state_up"],
)
self.assertTrue(cmd_output["shared"])
self.assertEqual(
'External',
cmd_output["router:external"],
)
self.assertFalse(cmd_output["is_default"])
self.assertFalse(
cmd_output["port_security_enabled"]
)
|
the-stack_0_25711
|
# -*- coding:UTF-8 -*-
'''
从键盘输入一些字符,逐个把它们写到磁盘文件上,
直到输入一个 # 为止。
'''
if __name__ == '__main__':
from sys import stdout
filename = raw_input('输入文件名:\n')
fp = open(filename, "w")
ch = raw_input('输入字符串:\n')
while ch != '#':
fp.write(ch)
stdout.write(ch)
ch = raw_input('')
fp.close()
|
the-stack_0_25715
|
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
WRITE_DISABLED = "Write Disabled"
UNLINK_INTERVAL = 15
UNLINK_RETRIES = 30
class VMAXProvision(object):
"""Provisioning Class for Dell EMC VMAX volume drivers.
It supports VMAX arrays.
"""
def __init__(self, rest):
self.utils = utils.VMAXUtils()
self.rest = rest
def create_storage_group(
self, array, storagegroup_name, srp, slo, workload,
extra_specs, do_disable_compression=False):
"""Create a new storage group.
:param array: the array serial number
:param storagegroup_name: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extra_specs: additional info
:param do_disable_compression: disable compression flag
:returns: storagegroup - storage group object
"""
start_time = time.time()
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_storage_group(storage_group):
# Check if storage group has been recently created
storagegroup = self.rest.get_storage_group(
array, storagegroup_name)
if storagegroup is None:
storagegroup = self.rest.create_storage_group(
array, storage_group, srp, slo, workload, extra_specs,
do_disable_compression)
LOG.debug("Create storage group took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
LOG.info("Storage group %(sg)s created successfully.",
{'sg': storagegroup_name})
else:
LOG.info("Storage group %(sg)s already exists.",
{'sg': storagegroup_name})
return storagegroup
return do_create_storage_group(storagegroup_name)
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
volume_size, extra_specs):
"""Create a new volume in the given storage group.
:param array: the array serial number
:param volume_name: the volume name (String)
:param storagegroup_name: the storage group name
:param volume_size: volume size (String)
:param extra_specs: the extra specifications
:returns: dict -- volume_dict - the volume dict
"""
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_volume_from_sg(storage_group):
start_time = time.time()
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs)
LOG.debug("Create volume from storage group "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
return volume_dict
return do_create_volume_from_sg(storagegroup_name)
def delete_volume_from_srp(self, array, device_id, volume_name):
"""Delete a volume from the srp.
:param array: the array serial number
:param device_id: the volume device id
:param volume_name: the volume name
"""
start_time = time.time()
LOG.debug("Delete volume %(volume_name)s from srp.",
{'volume_name': volume_name})
self.rest.delete_volume(array, device_id)
LOG.debug("Delete volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(
start_time, time.time())})
def create_volume_snapvx(self, array, source_device_id,
snap_name, extra_specs):
"""Create a snapVx of a volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param snap_name: the snapshot name
:param extra_specs: the extra specifications
"""
start_time = time.time()
LOG.debug("Create Snap Vx snapshot of: %(source)s.",
{'source': source_device_id})
self.rest.create_volume_snap(
array, snap_name, source_device_id, extra_specs)
LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param create_snap: Flag for create snapvx
"""
start_time = time.time()
if create_snap:
self.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs)
# Link source to target
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
LOG.debug("Create element replica took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def break_replication_relationship(
self, array, target_device_id, source_device_id, snap_name,
extra_specs):
"""Unlink a snapshot from its target volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Break snap vx link relationship between: %(src)s "
"and: %(tgt)s.",
{'src': source_device_id, 'tgt': target_device_id})
self._unlink_volume(array, source_device_id, target_device_id,
snap_name, extra_specs)
def _unlink_volume(
self, array, source_device_id, target_device_id, snap_name,
extra_specs, list_volume_pairs=None):
"""Unlink a target volume from its source volume.
:param array: the array serial number
:param source_device_id: the source device id
:param target_device_id: the target device id
:param snap_name: the snap name
:param extra_specs: extra specifications
:param list_volume_pairs: list of volume pairs, optional
:return: return code
"""
def _unlink_vol():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_vol_success']:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, unlink=True,
list_volume_pairs=list_volume_pairs)
kwargs['modify_vol_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_volume failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_vol_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'modify_vol_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def delete_volume_snap(self, array, snap_name,
source_device_id, restored=False):
"""Delete a snapVx snapshot of a volume.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
:param restored: Flag to indicate if restored session is being deleted
"""
LOG.debug("Delete SnapVx: %(snap_name)s for volume %(vol)s.",
{'vol': source_device_id, 'snap_name': snap_name})
self.rest.delete_volume_snap(
array, snap_name, source_device_id, restored)
def is_restore_complete(self, array, source_device_id,
snap_name, extra_specs):
"""Check and wait for a restore to complete
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: snapshot name
:param extra_specs: extra specification
:returns: bool
"""
def _wait_for_restore():
"""Called at an interval until the restore is finished.
:raises: loopingcall.LoopingCallDone
:raises: VolumeBackendAPIException
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['wait_for_restore_called']:
if self._is_restore_complete(
array, source_device_id, snap_name):
kwargs['wait_for_restore_called'] = True
except Exception:
exception_message = (_("Issue encountered waiting for "
"restore."))
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
if kwargs['wait_for_restore_called']:
raise loopingcall.LoopingCallDone()
if kwargs['retries'] > int(extra_specs[utils.RETRIES]):
LOG.error("_wait_for_restore failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(
retvalue=int(extra_specs[utils.RETRIES]))
kwargs = {'retries': 0,
'wait_for_restore_called': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_restore)
rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait()
return rc
def _is_restore_complete(self, array, source_device_id, snap_name):
"""Helper function to check if restore is complete.
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: the snapshot name
:returns: restored -- bool
"""
restored = False
snap_details = self.rest.get_volume_snap(
array, source_device_id, snap_name)
if snap_details:
linked_devices = snap_details.get("linkedDevices", [])
for linked_device in linked_devices:
if ('targetDevice' in linked_device and
source_device_id == linked_device['targetDevice']):
if ('state' in linked_device and
linked_device['state'] == "Restored"):
restored = True
return restored
def delete_temp_volume_snap(self, array, snap_name, source_device_id):
"""Delete the temporary snapshot created for clone operations.
There can be instances where the source and target both attempt to
delete a temp snapshot simultaneously, so we must lock the snap and
then double check it is on the array.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
"""
@coordination.synchronized("emc-snapvx-{snapvx_name}")
def do_delete_temp_snap(snapvx_name):
# Ensure snap has not been recently deleted
if self.rest.get_volume_snap(
array, source_device_id, snapvx_name):
self.delete_volume_snap(array, snapvx_name, source_device_id)
do_delete_temp_snap(snap_name)
def delete_volume_snap_check_for_links(self, array, snap_name,
source_devices, extra_specs):
"""Check if a snap has any links before deletion.
If a snapshot has any links, break the replication relationship
before deletion.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_devices: the source device ids
:param extra_specs: the extra specifications
"""
list_device_pairs = []
if not isinstance(source_devices, list):
source_devices = [source_devices]
for source_device in source_devices:
LOG.debug("Check for linked devices to SnapVx: %(snap_name)s "
"for volume %(vol)s.",
{'vol': source_device, 'snap_name': snap_name})
linked_list = self.rest.get_snap_linked_device_list(
array, source_device, snap_name)
if len(linked_list) == 1:
target_device = linked_list[0]['targetDevice']
list_device_pairs.append((source_device, target_device))
else:
for link in linked_list:
# If a single source volume has multiple targets,
# we must unlink each target individually
target_device = link['targetDevice']
self._unlink_volume(array, source_device, target_device,
snap_name, extra_specs)
if list_device_pairs:
self._unlink_volume(array, "", "", snap_name, extra_specs,
list_volume_pairs=list_device_pairs)
self.delete_volume_snap(array, snap_name, source_devices)
def extend_volume(self, array, device_id, new_size, extra_specs,
rdf_group=None):
"""Extend a volume.
:param array: the array serial number
:param device_id: the volume device id
:param new_size: the new size (GB)
:param extra_specs: the extra specifications
:param rdf_group: the rdf group number, if required
:returns: status_code
"""
start_time = time.time()
if rdf_group:
@coordination.synchronized('emc-rg-{rdf_group}')
def _extend_replicated_volume(rdf_group):
self.rest.extend_volume(array, device_id,
new_size, extra_specs)
_extend_replicated_volume(rdf_group)
else:
self.rest.extend_volume(array, device_id, new_size, extra_specs)
LOG.debug("Extend VMAX volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def get_srp_pool_stats(self, array, array_info):
"""Get the srp capacity stats.
:param array: the array serial number
:param array_info: the array dict
:returns: total_capacity_gb
:returns: remaining_capacity_gb
:returns: subscribed_capacity_gb
:returns: array_reserve_percent
"""
total_capacity_gb = 0
remaining_capacity_gb = 0
subscribed_capacity_gb = 0
array_reserve_percent = 0
srp = array_info['srpName']
LOG.debug(
"Retrieving capacity for srp %(srpName)s on array %(array)s.",
{'srpName': srp, 'array': array})
srp_details = self.rest.get_srp_by_name(array, srp)
if not srp_details:
LOG.error("Unable to retrieve srp instance of %(srpName)s on "
"array %(array)s.",
{'srpName': srp, 'array': array})
return 0, 0, 0, 0, False
try:
total_capacity_gb = srp_details['total_usable_cap_gb']
try:
used_capacity_gb = srp_details['total_used_cap_gb']
remaining_capacity_gb = float(
total_capacity_gb - used_capacity_gb)
except KeyError:
remaining_capacity_gb = srp_details['fba_free_capacity']
subscribed_capacity_gb = srp_details['total_subscribed_cap_gb']
array_reserve_percent = srp_details['reserved_cap_percent']
except KeyError:
pass
return (total_capacity_gb, remaining_capacity_gb,
subscribed_capacity_gb, array_reserve_percent)
def verify_slo_workload(self, array, slo, workload, srp):
"""Check if SLO and workload values are valid.
:param array: the array serial number
:param slo: Service Level Object e.g bronze
:param workload: workload e.g DSS
:param srp: the storage resource pool name
:returns: boolean
"""
is_valid_slo, is_valid_workload = False, False
if workload and workload.lower() == 'none':
workload = None
if not workload:
is_valid_workload = True
if slo and slo.lower() == 'none':
slo = None
valid_slos = self.rest.get_slo_list(array, srp)
valid_workloads = self.rest.get_workload_settings(array)
for valid_slo in valid_slos:
if slo == valid_slo:
is_valid_slo = True
break
for valid_workload in valid_workloads:
if workload == valid_workload:
is_valid_workload = True
break
if not slo:
is_valid_slo = True
if workload:
is_valid_workload = False
if not is_valid_slo:
LOG.error(
"SLO: %(slo)s is not valid. Valid values are: "
"%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos})
if not is_valid_workload:
LOG.error(
"Workload: %(workload)s is not valid. Valid values are "
"%(valid_workloads)s. Note you cannot "
"set a workload without an SLO.",
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:returns: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = storage_group['workload']
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)s. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
@coordination.synchronized('emc-rg-{rdf_group}')
def break_rdf_relationship(self, array, device_id, target_device,
rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices.
:param array: the array serial number
:param device_id: the source device id
:param target_device: target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair
"""
LOG.info("Suspending rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if state.lower() == utils.RDF_SYNCINPROG_STATE:
self.rest.wait_for_rdf_consistent_state(
array, device_id, target_device,
rep_extra_specs, state)
if state.lower() == utils.RDF_SUSPENDED_STATE:
LOG.info("RDF pair is already suspended")
else:
self.rest.modify_rdf_device_pair(
array, device_id, rdf_group, rep_extra_specs, suspend=True)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def break_metro_rdf_pair(self, array, device_id, target_device,
rdf_group, rep_extra_specs, metro_grp):
"""Delete replication for a Metro device pair.
Need to suspend the entire group before we can delete a single pair.
:param array: the array serial number
:param device_id: the device id
:param target_device: the target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: the replication extra specifications
:param metro_grp: the metro storage group name
"""
# Suspend I/O on the RDF links...
LOG.info("Suspending I/O for all volumes in the RDF group: %(rdfg)s",
{'rdfg': rdf_group})
self.disable_group_replication(
array, metro_grp, rdf_group, rep_extra_specs)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def delete_rdf_pair(
self, array, device_id, rdf_group, target_device, extra_specs):
"""Delete an rdf pairing.
If the replication mode is synchronous, only one attempt is required
to delete the pair. Otherwise, we need to wait until all the tracks
are cleared before the delete will be successful. As there is
currently no way to track this information, we keep attempting the
operation until it is successful.
:param array: the array serial number
:param device_id: source volume device id
:param rdf_group: the rdf group number
:param target_device: the target device
:param extra_specs: extra specifications
"""
LOG.info("Deleting rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if (extra_specs.get(utils.REP_MODE) and
extra_specs.get(utils.REP_MODE) == utils.REP_SYNC):
return self.rest.delete_rdf_pair(array, device_id, rdf_group)
def _delete_pair():
"""Delete a rdf volume pair.
Called at an interval until all the tracks are cleared
and the operation is successful.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['delete_pair_success']:
self.rest.delete_rdf_pair(
array, device_id, rdf_group)
kwargs['delete_pair_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("Delete volume pair failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['delete_pair_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'delete_pair_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_delete_pair)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def get_or_create_volume_group(self, array, group, extra_specs):
"""Get or create a volume group.
Sometimes it may be necessary to recreate a volume group on the
backend - for example, when the last member volume has been removed
from the group, but the cinder group object has not been deleted.
:param array: the array serial number
:param group: the group object
:param extra_specs: the extra specifications
:return: group name
"""
vol_grp_name = self.utils.update_volume_group_name(group)
return self.get_or_create_group(array, vol_grp_name, extra_specs)
def get_or_create_group(self, array, group_name, extra_specs):
"""Get or create a generic volume group.
:param array: the array serial number
:param group_name: the group name
:param extra_specs: the extra specifications
:return: group name
"""
storage_group = self.rest.get_storage_group(array, group_name)
if not storage_group:
self.create_volume_group(array, group_name, extra_specs)
return group_name
def create_volume_group(self, array, group_name, extra_specs):
"""Create a generic volume group.
:param array: the array serial number
:param group_name: the name of the group
:param extra_specs: the extra specifications
:returns: volume_group
"""
return self.create_storage_group(array, group_name,
None, None, None, extra_specs)
def create_group_replica(
self, array, source_group, snap_name, extra_specs):
"""Create a replica (snapVx) of a volume group.
:param array: the array serial number
:param source_group: the source group name
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.",
{'srcGroup': source_group})
# Create snapshot
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
def delete_group_replica(self, array, snap_name, source_group_name,
src_dev_ids, extra_specs):
"""Delete the snapshot.
:param array: the array serial number
:param snap_name: the name for the snap shot
:param source_group_name: the source group name
:param src_dev_ids: the list of source device ids
:param extra_specs: extra specifications
"""
# Delete snapvx snapshot
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name, 'snap_name': snap_name})
self.delete_volume_snap_check_for_links(
array, snap_name, src_dev_ids, extra_specs)
def link_and_break_replica(self, array, source_group_name,
target_group_name, snap_name, extra_specs,
list_volume_pairs, delete_snapshot=False):
"""Links a group snap and breaks the relationship.
:param array: the array serial
:param source_group_name: the source group name
:param target_group_name: the target group name
:param snap_name: the snapshot name
:param extra_specs: extra specifications
:param list_volume_pairs: the list of volume pairs
:param delete_snapshot: delete snapshot flag
"""
LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
# Link the snapshot
self.rest.modify_volume_snap(
array, None, None, snap_name, extra_specs, link=True,
list_volume_pairs=list_volume_pairs)
# Unlink the snapshot
LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
self._unlink_volume(array, None, None, snap_name, extra_specs,
list_volume_pairs=list_volume_pairs)
# Delete the snapshot if necessary
if delete_snapshot:
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
source_devices = [a for a, b in list_volume_pairs]
self.delete_volume_snap(array, snap_name, source_devices)
def enable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs, establish=False):
"""Resume rdf replication on a storage group.
Replication is enabled by default. This allows resuming
replication on a suspended group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param establish: flag to indicate 'establish' instead of 'resume'
"""
action = "Establish" if establish is True else "Resume"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def disable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Suspend rdf replication on a storage group.
This does not delete the rdf pairs, that can only be done
by deleting the group. This method suspends all i/o activity
on the rdf links.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
action = "Suspend"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def failover_group(self, array, storagegroup_name,
rdf_group_num, extra_specs, failover=True):
"""Failover or failback replication on a storage group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param failover: flag to indicate failover/ failback
"""
action = "Failover" if failover else "Failback"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def delete_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Split replication for a group and delete the pairs.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
group_details = self.rest.get_storage_group_rep(
array, storagegroup_name)
if (group_details and group_details.get('rdf')
and group_details['rdf'] is True):
action = "Split"
LOG.debug("Splitting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
LOG.debug("Deleting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.delete_storagegroup_rdf(
array, storagegroup_name, rdf_group_num)
def revert_volume_snapshot(self, array, source_device_id,
snap_name, extra_specs):
"""Revert a volume snapshot
:param array: the array serial number
:param source_device_id: device id of the source
:param snap_name: snapvx snapshot name
:param extra_specs: the extra specifications
"""
start_time = time.time()
self.rest.modify_volume_snap(
array, source_device_id, "", snap_name, extra_specs, restore=True)
LOG.debug("Restore volume snapshot took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
|
the-stack_0_25720
|
import string
def as_behave_table(data):
""" nosetests --with-doctest --with-coverage report_table.py
>>> from report_table import as_behave_table
>>> data = [('what', 'how', 'who'),
... ('lorem', 'that is a long value', 3.1415),
... ('ipsum', 89798, 0.2)]
>>> print as_behave_table(data)
| what | how | who |
| lorem | that is a long value | 3.1415 |
| ipsum | 89798 | 0.2 |
"""
table = []
# max size of each column
sizes = map(max, zip(*[[len(str(elt)) for elt in member]
for member in data]))
num_elts = len(sizes)
start_of_line = '| '
vertical_separator = ' | '
end_of_line = ' |'
meta_template = vertical_separator.join(['{{{{{0}:{{{0}}}}}}}'.format(i)
for i in range(num_elts)])
template = '{0}{1}{2}'.format(start_of_line,
meta_template.format(*sizes),
end_of_line)
for d in data:
table.append(template.format(*d))
return '\n'.join(table)
|
the-stack_0_25721
|
li = [1,2,3,4,5,6,7,8,9,10]
even_numbers = []
for x in li:
if x % 2 == 0:
even_numbers.append(x)
print(even_numbers)
#The alternate system using list comprehensions would be
even_numebers1 = [ x for x in range(1,11) if x % 2 == 0]
print(even_numebers1)
|
the-stack_0_25722
|
from __future__ import print_function
import sys
import time
import socket
from datetime import datetime
import happybase
from thriftpy.transport import TTransportException
from ..common.conf_reader import ConfReader
from ..common.error import full_trace_error
from ..common import update_prefix
# Variables exposed in configuration, default settings are those used before transition:
UPDATE_INFOCF = "info"
UPDATE_LISTSHA1CNAME = "list_sha1s"
IMG_INFOCF = "info"
IMG_BUFFCF = "info"
IMG_BUFFCNAME = "img_buffer"
IMG_URLCNAME = "s3_url"
EXTR_CF = "ext"
DEFAULT_HBASEINDEXER_PREFIX = "HBI_"
SKIP_FAILED = False
# Maximum number of retries before actually raising error
#MAX_ERRORS = 3
MAX_ERRORS = 2
# Reading a lot of data from HBase at once can be unstable
READ_BATCH_SIZE = 100
# Maximum number of rows when scanning (could be renamed to be more explicit)
MAX_ROWS = 500
# Maximum size of one row to be saved to HBase (could be dependent on HBase setup)
MAX_ROW_SIZE = 2097152
#UPDATE_BATCH_SIZE = 2048
UPDATE_BATCH_SIZE = 1000
HBASE_TIMEOUT = 60000
# Not yet exposed in configuration
EXTR_STR_PROCESSED = "processed"
EXTR_STR_FAILED= "failed"
UPDATE_STR_PROCESSED = "processed"
UPDATE_STR_STARTED = "started"
UPDATE_STR_CREATED = "created"
UPDATE_STR_COMPLETED = "completed"
IMG_URLBACKUPCNAME = "location"
IMG_PATHCNAME = "img_path"
# After transition
# Put everything in "data" column family and img:img for buffer but we SHOULD NOT write to it
# data vs. info, double check the consequences downstream.
# should we have an "info_column_family" and an "image_info_column_family"
# Uncharted format data:location for s3_url but we should not overwrite...
# we could use it as backup for getting s3_url?
# Could all these be specified dynamically, e.g. in conf file?
# img_info_column_family = "data"
# img_buffer_column_family = "img"
# extraction_column_family = "data"
# update_info_column_family = "info" # does not really matter
# update_completed_column = update_info_column_family+":"+update_str_completed
# img_buffer_column = img_buffer_column_family+":img"
# img_URL_column = img_info_column_family+":s3_url"
# img_backup_URL_column = img_info_column_family+":location"
# img_path_column = img_info_column_family+":img_path"
# default_prefix = "HBI_"
# MAX_ROWS = 500
class HBaseIndexerMinimal(ConfReader):
"""Indexing image and update information using HBase as backend.
"""
def __init__(self, global_conf_in, prefix=DEFAULT_HBASEINDEXER_PREFIX):
"""HBaseIndexerMinimal constructor.
:param global_conf_in: configuration file or dictionary
:type global_conf_in: str, dict
:param prefix: prefix in configuration
:type prefix: str
"""
self.last_refresh = datetime.now()
self.transport_type = 'buffered' # this is happybase default
# To store count of batches of updates pushed
self.dict_up = dict()
self.pool = None
self.timeout = 30
self.batch_update_size = UPDATE_BATCH_SIZE
# Column families and names
self.extrcf = None
self.imginfocf = None
self.imgbuffcf = None
self.imgbuffcname = None
self.updateinfocf = None
self.updatelistsha1scname = None
self.skipfailed = None
super(HBaseIndexerMinimal, self).__init__(global_conf_in, prefix)
self.set_pp(pp="HBaseIndexerMinimal")
print('[{}: log] verbose level is: {}'.format(self.pp, self.verbose))
self.refresh_hbase_conn("__init__")
# Expose all column names so they can be defined in configuration.
def get_dictcf_sha1_table(self):
"""Get dictionary of column families for images table.
:return: dictionary of column families
:rtype: dict
"""
return {self.imginfocf: dict(), self.extrcf: dict(), self.imgbuffcf: dict()}
def get_dictcf_update_table(self):
"""Get dictionary of column families for updates table.
:return: dictionary of column families
:rtype: dict
"""
return {self.updateinfocf: dict()}
def get_col_upproc(self):
"""Get full column (i.e. ``column_family:column_name``) for storing update processing end date.
:return: column update processed
:rtype: string
"""
return self.updateinfocf + ":" + UPDATE_STR_PROCESSED
def get_col_upstart(self):
"""Get full column (i.e. ``column_family:column_name``) for storing update processing start date.
:return: column update started
:rtype: string
"""
return self.updateinfocf + ":" + UPDATE_STR_STARTED
def get_col_upcomp(self):
"""Get full column (i.e. ``column_family:column_name``) for storing update completion status.
:return: column update completed
:rtype: string
"""
return self.updateinfocf + ":" + UPDATE_STR_COMPLETED
def get_col_upcreate(self):
"""Get full column (i.e. ``column_family:column_name``) for storing update creation date.
:return: column update completed
:rtype: string
"""
return self.updateinfocf + ":" + UPDATE_STR_CREATED
def get_col_imgurl(self):
"""Get full column (i.e. ``column_family:column_name``) for storing image URL.
:return: column image URL
:rtype: string
"""
return self.imginfocf + ":" + self.imgurlcname
def get_col_imgurlbak(self):
"""Get full column (i.e. ``column_family:column_name``) for storing image URL (backup).
:return: column image URL (backup)
:rtype: string
"""
return self.imginfocf + ":" + IMG_URLBACKUPCNAME
def get_col_imgpath(self):
"""Get full column (i.e. ``column_family:column_name``) for storing image path.
:return: column image path
:rtype: string
"""
return self.imginfocf + ":" + IMG_PATHCNAME
def get_col_imgbuff(self):
"""Get full column (i.e. ``column_family:column_name``) for storing image buffer.
:return: column image buffer
:rtype: string
"""
return self.imgbuffcf + ":" + self.imgbuffcname
def get_col_listsha1s(self):
"""Get full column (i.e. ``column_family:column_name``) for storing update images sha1 list.
:return: column sha1 list
:rtype: string
"""
return self.updateinfocf + ":" + self.updatelistsha1scname
def read_conf(self):
"""Reads configuration parameters from self.global_conf.
Required parameters are:
- ``host``
- ``table_sha1infos``
Optional parameters are:
- ``table_updateinfos``
- ``pool_thread``
- ``batch_update_size``
- ``column_list_sha1s``
- ``extr_family_column``
- ``image_info_column_family``
- ``image_buffer_column_family``
- ``image_buffer_column_name``
- ``update_info_column_family``
"""
super(HBaseIndexerMinimal, self).read_conf()
# HBase conf
self.hbase_host = str(self.get_required_param('host'))
# Get table of images and updates
self.table_sha1infos_name = str(self.get_required_param('table_sha1infos'))
# Optional for "IN" indexer for example
self.table_updateinfos_name = str(self.get_param('table_updateinfos'))
if self.verbose > 0:
msg = "[{}.read_conf: info] HBase tables name: {} (sha1infos), {} (updateinfos)"
print(msg.format(self.pp, self.table_sha1infos_name, self.table_updateinfos_name))
self.nb_threads = int(self.get_param('pool_thread', default=1))
self.batch_update_size = int(self.get_param('batch_update_size', default=UPDATE_BATCH_SIZE))
self.skipfailed = self.get_param("skip_failed", default=SKIP_FAILED)
# Can all columns be set similarly? And is this change effective everywhere?
self.updatelistsha1scname = self.get_param("column_list_sha1s", default=UPDATE_LISTSHA1CNAME)
self.extrcf = self.get_param("extr_column_family", default=EXTR_CF)
self.imginfocf = self.get_param("image_info_column_family", default=IMG_INFOCF)
self.imgbuffcf = self.get_param("image_buffer_column_family", default=IMG_BUFFCF)
self.imgurlcname = self.get_param("image_url_column_name", default=IMG_URLCNAME)
self.imgbuffcname = self.get_param("image_buffer_column_name", default=IMG_BUFFCNAME)
self.updateinfocf = self.get_param("update_info_column_family", default=UPDATE_INFOCF)
def refresh_hbase_conn(self, calling_function, sleep_time=0):
"""Refresh connection to HBase.
:param calling_function: name of function calling
:type calling_function: str
:param sleep_time: time to sleep before refreshing.
:type sleep_time: int
"""
# NB: This can take up to 4 seconds sometimes...
try:
start_refresh = time.time()
if self.verbose > 2:
dt_iso = datetime.utcnow().isoformat()
msg = "[{}.{}: {}] Trying to refresh connection pool."
print(msg.format(self.pp, calling_function, dt_iso))
sys.stdout.flush()
time.sleep(sleep_time)
self.pool = happybase.ConnectionPool(timeout=HBASE_TIMEOUT, size=self.nb_threads,
host=self.hbase_host, transport=self.transport_type)
if self.verbose > 1:
msg = "[{}.refresh_hbase_conn: log] Refreshed connection pool in {}s."
print(msg.format(self.pp, time.time() - start_refresh))
sys.stdout.flush()
except TTransportException as inst:
msg = "[{}.read_conf: error] Could not initialize connection to HBase ({})"
print(msg.format(self.pp, inst))
sys.stdout.flush()
def check_errors(self, nb_err, function_name, inst=None):
"""Check if function "function_name" has reached MAX_ERRORS errors.
Raise error if that is the case.
:param nb_err: number of errors caught in function "function_name"
:type nb_err: int
:param function_name: name of the function for which we want to check the error count.
:type function_name: str
:param inst: error instance.
:type inst: Exception
:raises Exception: if nb_err >= MAX_ERRORS
"""
if nb_err >= MAX_ERRORS:
msg = "[{}: error] function {} reached maximum number of error {}. Error {} was: {}"
raise Exception(msg.format(self.pp, function_name, MAX_ERRORS, type(inst), inst))
def get_create_table(self, table_name, conn=None, families=None):
"""Get HBase table "table_name", creating it if it does not exist yet.
:param table_name: name of the table to create.
:type table_name: string
:param conn: happybase connection
:type conn: :class:`happybase.Connection`
:param families: dictionary of column families (see ``get_dictcf_sha1_table`` and ``get_dictcf_update_table``)
:type families: dict
:return: table
:rtype: :class:`happybase.Table`
"""
# try:
if conn is None:
from happybase.connection import Connection
conn = Connection(self.hbase_host)
try:
# as no exception would be raised if table does not exist...
table = conn.table(table_name)
# ...try to access families to get error if table does not exist yet
_ = table.families()
# table exist, return it
return table
except Exception as inst:
# act differently based on error type (connection issue or actually table missing)
if type(inst) == TTransportException:
raise inst
else:
# we need to create the table
msg = "[{}.get_create_table: info] table {} does not exist (yet): {}{}"
print(msg.format(self.pp, table_name, type(inst), inst))
# but we need to know which column families it should contain
if families is None:
msg = "[{}.get_create_table: ERROR] table {} does not exist and 'families' not provided"
raise ValueError(msg.format(self.pp, table_name))
# Create table...
conn.create_table(table_name, families)
table = conn.table(table_name)
msg = "[{}.get_create_table: info] created table {}"
print(msg.format(self.pp, table_name))
# ... and return it
return table
# except Exception as inst:
# # May fail if families in dictionary do not match those of an existing table,
# # or because of connection issues. We want to raise up these error.
# raise inst
def scan_from_row(self, table_name, row_start=None, columns=None, maxrows=10, perr=0, inst=None):
"""Scan table "table_name" starting a row "row_start" and retrieving columns "columns".
:param table_name: name of table to scan
:type table_name: string
:param row_start: starting row
:type row_start: string
:param columns: columns to retrieve
:type columns: list
:param maxrows: maximum number of rows to return
:type maxrows: int
:param perr: number of errors caught so far
:type perr: int
:param inst: error instance caught
:type inst: Exception
:return: rows list
:rtype: list
"""
self.check_errors(perr, "scan_from_row", inst)
try:
with self.pool.connection(timeout=self.timeout) as connection:
hbase_table = connection.table(table_name)
# scan table from row_start, accumulate in rows the information of needed columns
rows = []
for one_row in hbase_table.scan(row_start=row_start, columns=columns, batch_size=maxrows):
rows.extend((one_row,))
if len(rows) >= maxrows:
return rows
if self.verbose > 7:
print("[{}.scan_from_row: log] got {} rows.".format(self.pp, len(rows)))
sys.stdout.flush()
return rows
except Exception as err_inst:
#print("[{}.scan_from_row: error] {}".format(self.pp, err_inst))
# try to force longer sleep time...
self.refresh_hbase_conn("scan_from_row", sleep_time=4 * perr)
return self.scan_from_row(table_name, row_start=row_start, columns=columns, maxrows=maxrows,
perr=perr + 1, inst=err_inst)
def get_updates_from_date(self, start_date, extr_type="", maxrows=MAX_ROWS, perr=0, inst=None):
"""Get updates of ``extr_type`` from ``self.table_updateinfos_name`` starting from first update
after row key build using ``extr_type`` and ``start_date`` as:
- ``update_prefix + extr_type + "_" + start_date``
:param start_date: date (formatted as YYYY-MM-DD) from which updates should be retrieved
:type start_date: string
:param extr_type: extraction type
:type extr_type: string
:param maxrows: maximum number of rows to return
:type maxrows: int
:param perr: number of errors caught so far
:type perr: int
:param inst: error instance caught
:type inst: Exception
:yeild: list of rows of updates.
"""
# start_date should be in format YYYY-MM-DD(_XX)
rows = None
# build row_start as index_update_YYYY-MM-DD
row_start = update_prefix + extr_type + "_" + start_date
continue_scan = True
err_inst = inst
while continue_scan:
if perr >= MAX_ERRORS:
self.check_errors(perr, "get_updates_from_date", err_inst)
try:
rows = self.scan_from_row(self.table_updateinfos_name, row_start=row_start, maxrows=maxrows)
if rows:
if extr_type:
# Filter out updates of other extractions type.
out_rows = []
for row in rows:
if extr_type in row[0]:
out_rows.append((row[0], row[1]))
else:
out_rows = rows
# NB: maxrows is not fully enforced here
if out_rows:
yield out_rows
# add '~' to exclude last row from next batch
row_start = rows[-1][0]+'~'
else:
#print "[{}.get_updates_from_date: log] 'rows' was None.".format(self.pp)
continue_scan = False
except Exception as err_inst: # try to catch any exception
print("[{}.get_updates_from_date: error] {}".format(self.pp, err_inst))
self.refresh_hbase_conn("get_updates_from_date", sleep_time=4 * perr)
perr += 1
def get_unprocessed_updates_from_date(self, start_date, extr_type="", maxrows=MAX_ROWS, perr=0,
inst=None):
"""Get unprocessed updates of type "extr_type" from "start_date".
:param start_date: start date (format YYYY-MM-DD[_XX])
:type start_date: str
:param extr_type: extraction type
:type extr_type: str
:param maxrows: maximum number of rows
:type maxrows: int
:param perr: previous errors count
:type perr: int
:param inst: last error instance
:type inst: Exception
:yield: list of rows
"""
fname = "get_unprocessed_updates_from_date"
rows = None
continue_scan = True
nb_rows_scanned = 0
update_suffix = extr_type + "_" + start_date
while continue_scan:
if perr >= MAX_ERRORS:
self.check_errors(perr, fname, inst)
try:
# build row_start as index_update_YYYY-MM-DD
row_start = update_prefix + update_suffix
if self.verbose > 3:
msg = "[{}.{}: log] row_start is: {}"
print(msg.format(self.pp, fname, row_start))
tmp_rows = self.scan_from_row(self.table_updateinfos_name, row_start=row_start,
columns=None, maxrows=maxrows, perr=0, inst=None)
if tmp_rows:
nb_rows_scanned += len(tmp_rows)
if self.verbose > 2:
msg = "[{}.{}: log] Scanned {} rows so far."
print(msg.format(self.pp, fname, nb_rows_scanned))
for row_id, row_val in tmp_rows:
last_row = row_id
# This fails for update from spark...
#start_date = '_'.join(last_row.split('_')[-2:])
# Does this work for any type of update?
update_suffix = '_'.join(last_row.split('_')[2:])+'~'
# changed to: self.column_update_processed
#if info_column_family + ":" + update_str_processed not in row_val:
#if self.column_update_processed not in row_val:
if self.get_col_upproc() not in row_val:
if extr_type and extr_type not in row_id:
continue
if rows is None:
rows = [(row_id, row_val)]
else:
rows.append((row_id, row_val))
else:
continue_scan = False
if rows:
if tmp_rows is None or len(tmp_rows) < maxrows:
# Looks like we reach the end of updates list
continue_scan = False
yield rows
except Exception as err_inst: # try to catch any exception
full_trace_error("[{}.{}: error] {}".format(self.pp, fname, err_inst))
inst = err_inst
self.refresh_hbase_conn(fname, sleep_time=4 * perr)
perr += 1
def get_missing_extr_updates_from_date(self, start_date, extr_type="", maxrows=MAX_ROWS,
perr=0, inst=None):
"""Get updates with missing extraction from "start_date".
:param start_date: start date (format YYYY-MM-DD[_XX])
:type start_date: str
:param extr_type: extraction type
:type extr_type: str
:param maxrows: maximum number of rows
:type maxrows: int
:param perr: previous errors count
:type perr: int
:param inst: last error instance
:type inst: Exception
:yield: list of rows
"""
fname = "get_missing_extr_updates_from_date"
# This induces that we keep reprocessing images that cannot be downloaded/processed every time
# we check for updates...
if not extr_type:
msg = "[{}.{}: warning] extr_type was not specified."
print(msg.format(self.pp, fname))
return
# start_date should be in format YYYY-MM-DD(_XX)
# build row_start as index_update_YYYY-MM-DD
update_suffix = extr_type + "_" + start_date
continue_scan = True
while continue_scan:
if perr >= MAX_ERRORS:
self.check_errors(perr, fname, inst)
try:
# build row_start as index_update_YYYY-MM-DD
row_start = update_prefix + update_suffix
if self.verbose > 3:
msg = "[{}.{}: log] row_start is: {}"
print(msg.format(self.pp, fname, row_start))
rows = self.scan_from_row(self.table_updateinfos_name, row_start=row_start, maxrows=maxrows)
if rows:
# Filter out updates of other extractions type
for row in rows:
out_rows = []
update_suffix = '_'.join(row[0].split('_')[2:]) + '~'
if extr_type in row[0]:
if self.get_col_upcomp() in row[1]:
# Update has been marked as all extractions being performed
continue
# TODO: should we store in a set all checked updated for missing extractions
# so we only process them once in the life of the indexer?
if self.verbose > 4:
msg = "[{}.{}: log] checking update {} for missing extractions"
print(msg.format(self.pp, fname, row[0]))
if self.get_col_listsha1s() in row[1]:
tmp_list_sha1s = row[1][self.get_col_listsha1s()].split(',')
missing_extr_sha1s = self.get_missing_extr_sha1s(tmp_list_sha1s, extr_type,
skip_failed=self.skipfailed)
if missing_extr_sha1s:
if self.verbose > 5:
msg = "[{}.{}: log] Update {} has missing extractions"
print(msg.format(self.pp, fname, row[0]))
out_row_val = dict()
out_row_val[self.get_col_listsha1s()] = ','.join(missing_extr_sha1s)
if out_rows:
out_rows.append((row[0], out_row_val))
else:
out_rows = [(row[0], out_row_val)]
else:
if self.verbose > 4:
msg = "[{}.{}: log] Update {} has no missing extractions"
print(msg.format(self.pp, fname, row[0]))
# We should mark as completed here (actually even if self.skipfailed?)
if not self.skipfailed:
update_completed_dict = {row[0]: {self.get_col_upcomp(): str(1)}}
self.push_dict_rows(dict_rows=update_completed_dict,
table_name=self.table_updateinfos_name)
else:
msg = "[{}.{}: warning] update {} has no images list"
print(msg.format(self.pp, fname, row[0]))
if out_rows:
yield out_rows
else:
# We have reached the end of the scan
continue_scan = False
except Exception as err_inst: # try to catch any exception
print("[{}.{}: error] {}".format(self.pp, fname, err_inst))
self.refresh_hbase_conn(fname)
perr += 1
# TODO: move to common
def get_today_string(self):
"""Get today date formatted as '%Y-%m-%d'
:return: today string
:rtype: str
"""
return datetime.today().strftime('%Y-%m-%d')
def get_next_update_id(self, today=None, extr_type=""):
"""Get next valid update id for "extr_type".
:param today: today string
:type today: str
:param extr_type: extraction type
:type extr_type: str
:return: update_id, today string
:rtype: tuple
"""
# get today's date as in format YYYY-MM-DD
if today is None:
today = self.get_today_string()
if today not in self.dict_up:
self.dict_up = dict()
self.dict_up[today] = 0
else:
self.dict_up[today] += 1
# add the extraction type, as different extraction may build different batches depending
# when they started to process the images
#update_id = update_prefix + extr_type + "_" + today + "_" + str(self.dict_up[today])
update_id = update_prefix + extr_type + "_" + today + "_" + str(self.dict_up[today]).zfill(3)
return update_id, today
def push_dict_rows(self, dict_rows, table_name, families=None, perr=0, inst=None):
"""Push a dictionary to the HBase ``table_name`` assuming keys are the row keys and
each entry is a valid dictionary containing the column names and values.
:param dict_rows: input dictionary to be pushed
:type dict_rows: dict
:param table_name: name of the HBase table where to push the data
:type table_name: str
:param families: all families of the table (if we need to create the table)
:type families: dict
:param perr: previous errors count
:type perr: int
:param inst: last error instance
:type inst: Exception
:return: True (success), None (failure)
"""
# Should we give an example of properly formed 'dict_rows' in doc?
self.check_errors(perr, "push_dict_rows", inst)
batch_size = 10
if perr > 0:
batch_size = 1
try:
# Use connection pool. Seems to fail when pool was initialized a long time ago...
with self.pool.connection(timeout=self.timeout) as connection:
if families:
hbase_table = self.get_create_table(table_name, families=families, conn=connection)
else:
hbase_table = self.get_create_table(table_name, conn=connection)
if hbase_table is None:
raise ValueError("Could not initialize hbase_table")
batch = hbase_table.batch(batch_size=batch_size)
# Assume dict_rows[k] is a dictionary ready to be pushed to HBase...
for row_key in dict_rows:
if perr > 1:
tmp_dict_row = dict_rows[row_key]
row_size = sys.getsizeof(tmp_dict_row)
for key in tmp_dict_row:
row_size += sys.getsizeof(tmp_dict_row[key])
if row_size > MAX_ROW_SIZE: # print warning if size is bigger than 2MB?
msg = "[{}: warning] Row {} size seems to be: {}. Keys are: {}"
print(msg.format(self.pp, row_key, row_size, tmp_dict_row.keys()))
sys.stdout.flush()
# Try to discard buffer to avoid 'KeyValue size too large' error
if self.get_col_imgbuff() in tmp_dict_row:
del tmp_dict_row[self.get_col_imgbuff()]
batch.put(row_key, tmp_dict_row)
else:
batch.put(row_key, dict_rows[row_key])
batch.send()
return True
except Exception as err_inst: # try to catch any exception
# For debugging
if perr + 1 == MAX_ERRORS:
msg = "[push_dict_rows: log] dict_rows keys: {}"
print(msg.format(dict_rows.keys()))
self.refresh_hbase_conn("push_dict_rows", sleep_time=4 * perr)
return self.push_dict_rows(dict_rows, table_name, families=families, perr=perr+1,
inst=err_inst)
def get_rows_by_batch(self, list_queries, table_name, rbs=READ_BATCH_SIZE, families=None,
columns=None, perr=0, inst=None):
"""Get rows with keys ``list_queries`` from ``table_name``
:param list_queries: list of row keys
:type list_queries: list
:param table_name: table name
:type table_name: str
:param rbs: read batch size (default: READ_BATCH_SIZE)
:type rbs: int
:param families: column families (in case we need to create the table)
:type families: dict
:param columns: columns to retrieve
:type columns: list
:param perr: previous errors count
:type perr: int
:param inst: last error instance
:type inst: Exception
:return: list of rows (only with ``columns`` values if specified)
:rtype: list
"""
self.check_errors(perr, "get_rows_by_batch", inst)
try:
with self.pool.connection(timeout=self.timeout) as connection:
#hbase_table = connection.table(table_name)
if families:
hbase_table = self.get_create_table(table_name, families=families, conn=connection)
else:
hbase_table = self.get_create_table(table_name, conn=connection)
if hbase_table:
# slice list_queries in batches of batch_size to query
rows = []
nb_batch = 0
for batch_start in range(0, len(list_queries), rbs):
batch_end = min(batch_start+rbs, len(list_queries))
batch_list_queries = list_queries[batch_start:batch_end]
try:
rows.extend(hbase_table.rows(batch_list_queries, columns=columns))
except socket.timeout:
if self.verbose > 2:
msg = "[{}.get_rows_by_batch: warning] timed out when requesting rows: {}"
print(msg.format(self.pp, batch_list_queries))
sys.stdout.flush()
# how to catch "Hbase_thrift.IOError" ?
nb_batch += 1
if self.verbose > 5:
msg = "[{}.get_rows_by_batch: log] got {}/{} rows using {} batches."
print(msg.format(self.pp, len(rows), len(list_queries), nb_batch))
return rows
else:
msg = "[{}.get_rows_by_batch: error] could not get table: {} (families: {})"
raise ValueError(msg.format(self.pp, table_name, families))
except Exception as err_inst:
if type(err_inst) == ValueError:
raise err_inst
# try to force longer sleep time if error repeats...
self.refresh_hbase_conn("get_rows_by_batch", sleep_time=perr)
lower_rbs = max(int(rbs/2),1)
return self.get_rows_by_batch(list_queries, table_name, rbs=lower_rbs, families=families,
columns=columns, perr=perr+1, inst=err_inst)
def get_columns_from_sha1_rows(self, list_sha1s, columns, rbs=READ_BATCH_SIZE, families=None,
perr=0, inst=None):
"""Get columns ``columns`` for images in ``list_sha1s``
:param list_sha1s: list of images sha1
:type list_sha1s: list
:param columns: columns to retrieve
:type columns: list
:param rbs: read batch size (default: READ_BATCH_SIZE)
:type rbs: int
:param families: column families (in case we need to create the table)
:type families: dict
:param perr: previous errors count
:type perr: int
:param inst: last error instance
:type inst: Exception:return: rows of images filled with requested columns
:return: list of rows (only with ``columns`` values)
:rtype: list
"""
rows = None
self.check_errors(perr, "get_columns_from_sha1_rows", inst)
if list_sha1s:
try:
rows = self.get_rows_by_batch(list_sha1s, self.table_sha1infos_name, rbs=rbs,
families=families, columns=columns)
except Exception as err_inst: # try to catch any exception
print(err_inst)
self.refresh_hbase_conn("get_columns_from_sha1_rows")
lower_rbs = rbs
if perr > 0:
lower_rbs = max(int(rbs / 4), 1)
return self.get_columns_from_sha1_rows(list_sha1s, columns, rbs=lower_rbs,
families=families, perr=perr + 1, inst=err_inst)
return rows
def get_features_from_sha1s(self, list_sha1s, extr_type, feat_type_decode=None):
""" Get features of "extr_type" for images in "list_sha1s"
:param list_sha1s: list of images sha1
:type list_sha1s: list
:param extr_type: extraction type
:type extr_type: str
:param feat_type_decode: type of feature (to know how to decode in featB64decode)
:type feat_type_decode: str
:return: (samples_id, feats) tuple of list of sample ids and corresponding features
:rtype: tuple
"""
from ..featurizer.featsio import featB64decode
# Cannot use column filters here...
has_detection = False
if "_".join(extr_type.split("_")[-2:]) != "full_image":
has_detection = True
# sbpycaffe is saved as np.float32 while dlib face features are np.float64
if feat_type_decode is None:
feat_type_decode = extr_type.split("_")[0]
# We need to get all extractions and parse them for matches with extr_type...
# We could read image infos if we need to filter things out based on format and/or image size
rows = self.get_columns_from_sha1_rows(list_sha1s, columns=[self.extrcf])
samples_id = []
feats = []
for row in rows:
for key in row[1]:
notinfocol = not key.endswith("_updateid") and \
not key.endswith(EXTR_STR_PROCESSED) and not key.endswith(EXTR_STR_FAILED)
if key.startswith(self.extrcf + ":" + extr_type) and notinfocol:
# Get sample id
if not has_detection:
sid = str(row[0])
else:
# parse to get id, sha1 + detection_box
sid = str(row[0])+"_"+"_".join(key.split("_")[4:8])
# Get feature
try:
feat = featB64decode(row[1][key], feat_type_decode)
# Add sample id and feature
samples_id.append(sid)
feats.append(feat)
except Exception as inst:
msg = "[{}.get_features_from_sha1s: log] Failed to get feature for image {}"
print(msg.format(self.pp, row[0]))
#full_trace_error(msg.format(self.pp, row[0]))
#raise inst
if self.verbose > 0:
msg = "[{}: info] Got {}/{} rows and {} features."
print(msg.format(self.pp, len(rows), len(list_sha1s), len(samples_id)))
return samples_id, feats
def get_missing_extr_sha1s(self, list_sha1s, extr_type, skip_failed=False):
"""Get list of images sha1 for which "extr_type" was not computed.
:param list_sha1s: list of images sha1
:type list_sha1s: list
:param extr_type: extraction type
:type extr_type: str
:return: list of sha1 without extraction
:rtype: list
"""
rows = self.get_columns_from_sha1_rows(list_sha1s, columns=[self.extrcf])
sha1s_w_extr = set()
for row in rows:
for key in row[1]:
kstart = key.startswith(self.extrcf + ":" + extr_type)
kfailed = (skip_failed and key.endswith(EXTR_STR_FAILED) and row[1][key]==str(1))
kend = key.endswith(EXTR_STR_PROCESSED) or kfailed
if kstart and kend:
sha1s_w_extr.add(str(row[0]))
return list(set(list_sha1s) - sha1s_w_extr)
# DEPRECATED
# # use http://happybase.readthedocs.io/en/latest/api.html?highlight=scan#happybase.Table.scan?
# def scan_with_prefix(self, table_name, row_prefix=None, columns=None, maxrows=10, perr=0,
# inst=None):
# self.check_errors(perr, "scan_with_prefix", inst)
# try:
# with self.pool.connection(timeout=self.timeout) as connection:
# hbase_table = connection.table(table_name)
# # scan table for rows with row_prefix, accumulate in rows information of requested columns
# rows = []
# for one_row in hbase_table.scan(row_prefix=row_prefix, columns=columns, batch_size=10):
# # print "one_row:",one_row
# rows.extend((one_row,))
# if len(rows) >= maxrows:
# return rows
# if self.verbose:
# print("[{}.scan_with_prefix: log] got {} rows.".format(self.pp, len(rows)))
# sys.stdout.flush()
# return rows
# except Exception as inst:
# print("[{}.scan_with_prefix: error] {}".format(self.pp, inst))
# # try to force longer sleep time...
# self.refresh_hbase_conn("scan_with_prefix", sleep_time=4 * perr)
# return self.scan_with_prefix(table_name, row_prefix=row_prefix, columns=columns,
# maxrows=maxrows, previous_err=perr + 1, inst=inst)
# # DEPRECATED
# def get_batch_update(self, list_sha1s):
# l = len(list_sha1s)
# for ndx in range(0, l, self.batch_update_size):
# yield list_sha1s[ndx:min(ndx + self.batch_update_size, l)]
# # DEPRECATED
# def push_list_updates(self, list_sha1s, previous_err=0, inst=None):
# self.check_errors(previous_err, "push_list_updates", inst)
# today = None
# dict_updates = dict()
# # Build batches of self.batch_update_size of images updates
# # NB: this batching is redundant with what is done in 'full_image_updater_kafka_to_hbase'
# # but ensures batches have the right size even if called from somewhere else...
# for batch_list_sha1s in self.get_batch_update(list_sha1s):
# update_id, today = self.get_next_update_id(today)
# dict_updates[update_id] = {self.column_list_sha1s: ','.join(batch_list_sha1s)}
# # Push them
# self.push_dict_rows(dict_updates, self.table_updateinfos_name)
# # DEPRECATED
# def push_list_updates(self, list_sha1s, update_id):
# """ Push the 'update_id' composed of the images in 'list_sha1s' to 'table_updateinfos_name'.
#
# :param list_sha1s: list of the images SHA1
# :param update_id: update identifier
# """
# # Build update dictionary
# dict_updates = dict()
# dict_updates[update_id] = {self.get_col_listsha1s(): ','.join(list_sha1s)}
# # Push it
# self.push_dict_rows(dict_updates, self.table_updateinfos_name)
|
the-stack_0_25723
|
import discord
from discord.ext import commands
import os
import cogs
from myutils import MyUtils
TOKEN = os.getenv("BOT_TOKEN")
PREFIX = "&"
bot = commands.Bot(command_prefix=PREFIX, description="Bot de l'ASTUS")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name="Help"))
print(f'{bot.user} has connected to Discord!')
@bot.event
async def on_message(message):
if not message.author.bot:
if message.content.lower() == "ping":
await message.channel.send("pong")
await bot.change_presence(activity=discord.Game(name="Ping-Pong"))
if message.content.lower().replace(" ", "") in ["astusbot", "botastus", ]:
await message.channel.send("Le bot de l'astus pour te servir, tu as besoin de savoir ce que tu peux "
"me demander ? tape ``" + PREFIX + "help `` pour avoir une liste de ce que"
"je sais faire. \n Sinon ``" + PREFIX +
"help [sujet]`` te permet "
"d'avoir de l'aide sur un sujet en particulier :wink:")
await bot.process_commands(message)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send("Mmmmmmh, j'ai bien l'impression que cette commande n'existe pas :/")
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Il manque un argument.")
if isinstance(error, commands.CheckFailure) or isinstance(error, commands.MissingPermissions):
await ctx.send("Oups tu ne peux pas utiliser cette commande.")
if isinstance(error, discord.Forbidden):
await ctx.send("Oups, je n'ai pas les permissions nécessaires pour faire cette commmande")
@bot.event
async def on_raw_reaction_add(payload):
messageID = payload.message_id
if messageID == 726611125252128768:
await bot.change_presence(activity=discord.Game(name="Give some roles"))
guildID = payload.guild_id
guild = discord.utils.find(lambda g: g.id == guildID, bot.guilds)
if payload.emoji.name == '3️⃣':
# print("3TC")
await payload.member.add_roles(MyUtils(guild).get3TCRole(),
MyUtils(guild).getStudentRole())
elif payload.emoji.name == '4️⃣':
# print("4TC")
await payload.member.add_roles(MyUtils(guild).get4TCRole(),
MyUtils(guild).getStudentRole())
elif payload.emoji.name == '5️⃣':
# print("5TC")
await payload.member.add_roles(MyUtils(guild).get5TCRole(),
MyUtils(guild).getStudentRole())
elif payload.emoji.name == '🇦':
# print("TCA")
await payload.member.add_roles(MyUtils(guild).getTCARole())
elif payload.emoji.name == '👨🏫':
# print("Prof")
await payload.member.add_roles(MyUtils(guild).getProfRole())
elif payload.emoji.name == '🎓':
# print("Diplomes")
await payload.member.add_roles(MyUtils(guild).getDiplomesRole())
elif payload.emoji.name == '🆕':
# print("Futur TC")
await payload.member.add_roles(MyUtils(guild).getFuturTCRole())
elif payload.emoji.name == "💼":
await payload.member.add_roles(MyUtils(guild).getEntrepriseRole())
@bot.event
async def on_member_update(before, after):
if len(before.roles) < len(after.roles):
new_role = next(role for role in after.roles if role not in before.roles)
chan = await after.create_dm()
if new_role.name == "3 TC":
await chan.send("\nSalut, tu as le rôle 3TC sur le serveur de l'astus, ce rôle te permet de voir la "
"catégorie 3TC et de discuter avec tes camarades")
if new_role.name == "4 TC":
await chan.send("\nSalut, tu as le rôle 4TC sur le serveur de l'astus, ce rôle te permet de voir la "
"catégorie 4TC et de discuter avec tes camarades")
if new_role.name == "5 TC":
await chan.send("\nSalut, tu as le rôle 5TC sur le serveur de l'astus, ce rôle te permet de voir la "
"catégorie 5TC et de discuter avec tes camarades")
if new_role.name == "Futur TC":
await chan.send(
"\nSalut, et bienvenue à toi Futur TC. Tu as accès à la categorie Integration du serveur :wave: \n\n "
"Le bureau de l'Astus est prêt à t'accueillir et à te faire passer une intégration que tu n'oublieras "
"pas , crois nous ! ( tout dans le respect des gestes barrières :man_pouting: :left_right_arrow: "
":deaf_person: , le gel hydroalcoolique sera notre meilleur ami ). \n"
":arrow_forward: Point intégration : La rentrée est initialement prévue le 14 septembre 2020, mais une "
"rumeur de Covid complique les choses. Donc pour le moment on se base dessus, et on prévoie de vous "
"organiser une inté à partir du jeudi 10 au soir. \n"
":arrow_forward: Si ce n'est pas déjà le cas, on t'invite à rejoindre le groupe Facebook de la promo, "
"où des informations tourneront aussi par rapport aux activités en journée"
" www.facebook.com/groups/tc2023/ \n "
":arrow_forward: Questions réponses : Ce chanel est dédié à répondre à toutes vos questions sur"
" l'intégration, que ce soit d'un point de vue logistique ou même sur l'organisation globale de "
"celle- ci. N'hésite pas, nous serons nombreux à pouvoir te répondre ! \n")
if new_role.name == "Student":
await chan.send("\n:wave: Bienvenue sur le serveur de l'ASTUS, tu trouveras plusieurs categories sur le "
"serveur. \n \n"
"``Général`` ici des annonces de l'ASTUS seront faites. Vous pouvez voir un channel \n"
"``gestion-music`` qui permet d'utiliser l'enceinte de l'ASTUS \n"
"``Que deviens- tu ? `` Tu as envie de parler de ton expérience à l'issu de ton parcours "
"TC? Des conseils à donner aux futurs diplômés? Cet espace est fait pour toi ! "
" :man_technologist: \n"
"Au contraire, tu es un étudiant concerné par ce que deviennent les anciens diplômés,"
" c'est aussi ici que tu peux t'exprimer ! \n"
"``Section Astus `` ( Accès attribués aux étudiants ): "
"Alors là, vous faites ce que vous voulez, quand vous voulez. Le chanel modélise le local "
"de l'asso, donc on modère uniquement en cas de propos haineux, racistes, ou toute la liste"
" qui suit. C'est votre espace détente donc lâchez vous, ça compte aussi pour les "
"futurs 3TC ! \n"
"``Section intégration `` Le bureau de l'Astus est prêt à t'accueillir et à te faire passer"
" une intégration que tu n'oublieras pas , crois nous ! ( tout dans le respect des gestes "
"barrières :man_pouting: :left_right_arrow: :deaf_person: , le gel hydroalcoolique sera "
"notre meilleur ami ). \n "
":arrow_forward: Point intégration : La rentrée est initialement prévue le 14 septembre"
" 2020, mais une rumeur de Covid complique les choses. Donc pour le moment on se base "
"dessus, et on prévoie de vous organiser une inté à partir du jeudi 10 au soir. \n"
":arrow_forward: Si ce n'est pas déjà le cas, on t'invite à rejoindre le groupe "
"Facebook de la promo, où des informations tourneront aussi par rapport aux activités "
"en journée www.facebook.com/groups/tc2023/ \n "
":arrow_forward: Questions réponses : Ce chanel est dédié à répondre à toutes "
"vos questions sur l'intégration, que ce soit d'un point de vue logistique ou même sur "
"l'organisation globale de celle- ci. N'hésite pas, nous serons nombreux "
"à pouvoir te répondre ! \n"
)
if new_role.name in ["Prof", "Diplômés"]:
await chan.send("\n:wave:Madame, Monsieur, \n"
"Bienvenue sur le serveur de l'ASTUS, vous trouverez plusieurs categories sur le "
"serveur. :speaking_head: \n \n"
":arrow_forward: ``Général`` ici des annonces de l'ASTUS seront faites. \n"
":arrow_forward: ``gestion-music`` qui permet d'utiliser l'enceinte de l'ASTUS \n"
":arrow_forward: ``Un Boulot / Stage`` , permet de mettre en relation des dipômés avec "
"les TC actuels "
"afin de trouver un stage ou un emploi pour les 5TC qui vont avoir leur diplôme \n"
" :arrow_forward: Garder le contact, permet de discuter avec des diplômés de leur"
" parcours\n "
)
if new_role.name == "Admin Groupe de Travail":
await chan.send("\nTu es un admin des groupes de travail mis en place par l'ASTUS, tu peux créer, "
"supprimer des channels dans la categorie groupe de travail afin de les animer "
"au mieux. May the force be with you ! :man_technologist: \n"
)
if new_role.name == "ASTUS":
await chan.send("\nBienvenue à l'ASTUS ! \n"
"Tout d'abord, félicitation à toi pour avoir intégré l'ASTUS :wink: \n"
"Tu as maintenant accés à la categorie ASTUS, tu retrouveras un channel général pour "
"parler avec tous tes p'tits potes de l'ASTUS. Il y a aussi channel passation pour "
"parler avec l'ancien G4 de la gestion de l'ASTUS quand la fameuse heure viendra."
" En fonction de ton rôle, tu ne vois pas certains"
"channel, je t'explique tout cela rapidement :wink:\n")
if new_role.name == "G4":
await chan.send("\nUn grand pouvoir inplique de grandes responsabilités. C'est grâce à toi que l'astus "
"peut tourner. Tu as accès à quelques commandes de gestion du serveur (plus d'info "
"avec ``" + PREFIX + "help`` sur le serveur\n")
if new_role.name == "Team Event":
await chan.send("\nC'est toi qui va nous régaler avec tout pleins d'Event. Un channel dans la catégorie "
"ASTUS t'es réservé\n")
if new_role.name == "Resp Team Event":
await chan.send("\nTu gères la Team Event, pour cela tu as accès à un channel dédié avec ta team\n")
if new_role.name == "Team Entreprise":
await chan.send("\nC'est toi qui va nous régaler avec tout pleins de rencontre avec des entreprises."
"Un channel dans la catégorie ASTUS t'es réservé\n")
if new_role.name == "Resp Team Entreprise":
await chan.send("\nTu gères la Team Entreprise, pour cela tu as accès à un channel dédié avec ta team\n")
if new_role.name == "Resp Site International":
await chan.send("\nResp du site ! \n"
"C'est grâce à toi que le site peut évoluer, demande à ce qu'on t'ajoute au "
"repo GitHub :wink:\n")
if new_role.name == "Resp Comm":
await chan.send("\nResp comm ! \n"
"L'ASTUS compte sur toi pour un max de communication. Tu géres la page FB de l'astus. "
"Tu fais les annonces et les affiches pour tous les events\n ")
if new_role.name == "Entreprise":
await chan.send("\n:wave:Madame, Monsieur, \n"
"Bienvenue sur le serveur de l'ASTUS, vous trouverez plusieurs categories sur le "
"serveur. :speaking_head: \n \n"
":arrow_forward: ``Général`` ici tous le monde peut parler (profs, élèves, entreprises, "
"diplômés). \n"
":arrow_forward: ``Un Boulot / Stage`` , permet de mettre en relation des étudiants avec "
"des entrepries. \n"
)
if __name__ == '__main__':
# Remove default help command
bot.remove_command("help")
# cogs
bot.add_cog(cogs.CogPassation(bot, PREFIX))
bot.add_cog(cogs.CogNewyear(bot))
bot.add_cog(cogs.CogHelp(bot))
bot.add_cog(cogs.CogVideoDiplomes(bot))
bot.add_cog(cogs.CogInvitation(bot))
bot.add_cog(cogs.CogIpInfo(bot))
bot.add_cog(cogs.CogLookup(bot))
bot.add_cog(cogs.CogInternational(bot))
bot.add_cog(cogs.CogVendrediChill(bot))
bot.run(TOKEN)
|
the-stack_0_25725
|
class Solution:
def gardenNoAdj(self, N: int, paths):
res = [0] * N
G = [[] for i in range(N)]
for x, y in paths:
G[x - 1].append(y - 1)
G[y - 1].append(x - 1)
for i in range(N + 1):
res[i - 1] = ({1, 2, 3, 4} - {res[j] for j in G[i - 1]}).pop()
return res
|
the-stack_0_25726
|
"""
=====================
Demo: demo_avecppo
=====================
"""
from rlberry.agents.torch import AVECPPOAgent
from rlberry.envs.benchmarks.ball_exploration import PBall2D
env = PBall2D()
n_episodes = 400
horizon = 256
agent = AVECPPOAgent(
env, horizon=horizon, gamma=0.99, learning_rate=0.00025, eps_clip=0.2, k_epochs=4
)
agent.fit(budget=n_episodes)
env.enable_rendering()
state = env.reset()
for tt in range(200):
action = agent.policy(state)
next_state, reward, done, _ = env.step(action)
state = next_state
env.render()
|
the-stack_0_25727
|
# -*- coding: utf-8 -*-
"""Download functionality for UMLS."""
import logging
import zipfile
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union
import bs4
import pystow
import pystow.utils
import requests
from pystow.utils import name_from_url
__all__ = [
"download_tgt",
"download_umls",
"download_umls_metathesaurus",
"open_umls",
]
logger = logging.getLogger(__name__)
MODULE = pystow.module("bio", "umls")
TGT_URL = "https://utslogin.nlm.nih.gov/cas/v1/api-key"
def download_tgt(url: str, path: Union[str, Path], *, api_key: Optional[str] = None) -> None:
"""Download a file via the UMLS ticket granting system.
This implementation is based on the instructions listed at
https://documentation.uts.nlm.nih.gov/automating-downloads.html.
:param url: The URL of the file to download, like
``https://download.nlm.nih.gov/umls/kss/2021AB/umls-2021AB-mrconso.zip``
:param path: The local file path where the file should be downloaded
:param api_key: An API key. If not given, is looked up using
:func:`pystow.get_config` with the ``umls`` module and ``api_key`` key.
"""
api_key = pystow.get_config("umls", "api_key", passthrough=api_key, raise_on_missing=True)
# Step 1: get a link to the ticket granting system (TGT)
auth_res = requests.post(TGT_URL, data={"apikey": api_key})
# for some reason, this API returns HTML. This needs to be parsed,
# and there will be a form whose action is the next thing to POST to
soup = bs4.BeautifulSoup(auth_res.text, features="html.parser")
action_url = soup.find("form").attrs["action"]
logger.info("[umls] got TGT url: %s", action_url)
# Step 2: get a service ticket for the file you want to download
# by POSTing to the action URL with the name of the URL you actually
# want to download inside the form data
key_res = requests.post(action_url, data={"service": url})
# luckily this one just returns the text you need
service_ticket = key_res.text
logger.info("[umls] got service ticket: %s", service_ticket)
# Step 3: actually try downloading the file you want, using the
# service ticket issued in the last step as a query parameter
pystow.utils.download(
url=url,
path=path,
backend="requests",
params={"ticket": service_ticket},
)
def _download_umls(
url: str, version: Optional[str] = None, *, api_key: Optional[str] = None, force: bool = False
) -> Path:
if version is None:
import bioversions
version = bioversions.get_version("umls")
path = MODULE.join(version, name=name_from_url(url))
if path.is_file() and not force:
return path
download_tgt(url, path, api_key=api_key)
return path
def download_umls(
version: Optional[str] = None, *, api_key: Optional[str] = None, force: bool = False
) -> Path:
"""Ensure the given version of the UMLS MRCONSO.RRF file.
:param version: The version of UMLS to ensure. If not given, is looked up
with :mod:`bioversions`.
:param api_key: An API key. If not given, is looked up using
:func:`pystow.get_config` with the ``umls`` module and ``api_key`` key.
:param force: Should the file be re-downloaded, even if it already exists?
:return: The path of the file for the given version of UMLS.
"""
url = f"https://download.nlm.nih.gov/umls/kss/{version}/umls-{version}-mrconso.zip"
return _download_umls(url=url, version=version, api_key=api_key, force=force)
def download_umls_metathesaurus(
version: Optional[str] = None, *, api_key: Optional[str] = None, force: bool = False
) -> Path:
"""Ensure the given version of the UMLS metathesaurus zip archive.
:param version: The version of UMLS to ensure. If not given, is looked up
with :mod:`bioversions`.
:param api_key: An API key. If not given, is looked up using
:func:`pystow.get_config` with the ``umls`` module and ``api_key`` key.
:param force: Should the file be re-downloaded, even if it already exists?
:return: The path of the file for the given version of UMLS.
"""
url = f"https://download.nlm.nih.gov/umls/kss/{version}/umls-{version}-metathesaurus.zip"
return _download_umls(url=url, version=version, api_key=api_key, force=force)
@contextmanager
def open_umls(version: Optional[str] = None, *, api_key: Optional[str] = None, force: bool = False):
"""Ensure and open the UMLS MRCONSO.RRF file from the given version.
:param version: The version of UMLS to ensure. If not given, is looked up
with :mod:`bioversions`.
:param api_key: An API key. If not given, is looked up using
:func:`pystow.get_config` with the ``umls`` module and ``api_key`` key.
:param force: Should the file be re-downloaded, even if it already exists?
:yields: The file, which is used in the context manager.
"""
path = download_umls(version=version, api_key=api_key, force=force)
with zipfile.ZipFile(path) as zip_file:
with zip_file.open("MRCONSO.RRF", mode="r") as file:
yield file
|
the-stack_0_25728
|
import numpy as np
import pytest
import aesara.tensor as at
from aesara.graph.fg import FunctionGraph
from aesara.tensor.type import matrix
from aesara.tensor.utils import hash_from_ndarray, shape_of_variables
def test_hash_from_ndarray():
hashes = []
rng = np.random.rand(5, 5)
for data in [
-2,
-1,
0,
1,
2,
np.zeros((1, 5)),
np.zeros((1, 6)),
# Data buffer empty but different shapes
np.zeros((1, 0)),
np.zeros((2, 0)),
# Same data buffer and shapes but different strides
np.arange(25).reshape(5, 5),
np.arange(25).reshape(5, 5).T,
# Same data buffer, shapes and strides but different dtypes
np.zeros((5, 5), dtype="uint32"),
np.zeros((5, 5), dtype="int32"),
# Test slice
rng,
rng[1:],
rng[:4],
rng[1:3],
rng[::2],
rng[::-1],
]:
data = np.asarray(data)
hashes.append(hash_from_ndarray(data))
assert len(set(hashes)) == len(hashes)
# test that different type of views and their copy give the same hash
assert hash_from_ndarray(rng[1:]) == hash_from_ndarray(rng[1:].copy())
assert hash_from_ndarray(rng[1:3]) == hash_from_ndarray(rng[1:3].copy())
assert hash_from_ndarray(rng[:4]) == hash_from_ndarray(rng[:4].copy())
assert hash_from_ndarray(rng[::2]) == hash_from_ndarray(rng[::2].copy())
assert hash_from_ndarray(rng[::-1]) == hash_from_ndarray(rng[::-1].copy())
class TestShapeOfVariables:
def test_simple(self):
x = matrix("x")
y = x + x
fgraph = FunctionGraph([x], [y], clone=False)
shapes = shape_of_variables(fgraph, {x: (5, 5)})
assert shapes == {x: (5, 5), y: (5, 5)}
x = matrix("x")
y = at.dot(x, x.T)
fgraph = FunctionGraph([x], [y], clone=False)
shapes = shape_of_variables(fgraph, {x: (5, 1)})
assert shapes[x] == (5, 1)
assert shapes[y] == (5, 5)
def test_subtensor(self):
x = matrix("x")
subx = x[1:]
fgraph = FunctionGraph([x], [subx], clone=False)
shapes = shape_of_variables(fgraph, {x: (10, 10)})
assert shapes[subx] == (9, 10)
def test_err(self):
x = matrix("x")
subx = x[1:]
fgraph = FunctionGraph([x], [subx])
with pytest.raises(ValueError):
shape_of_variables(fgraph, {x: (10, 10)})
|
the-stack_0_25730
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import absolute_import
import numpy as np
from akg.utils import kernel_exec as utils
from gen_random import random_gaussian
from akg.utils.result_analysis import gpu_profiling
from akg.utils.format_transform import to_tvm_nd_array
from test_fused_pattern_grad import relu_grad_np, bn_beta_grad_np, bn_gamma_grad_np
from akg.ops.poly_gpu import fused_relu_grad_bn_update_grad_manual, fused_relu_grad_bn_update_grad_auto
def compute_expect(data_sum, in_bn, head_active, in_active, layout):
out_dtype = data_sum.dtype
relugrad = relu_grad_np(head_active, in_active).astype(out_dtype)
inbn_cast = in_bn.astype(out_dtype)
bn_beta_ad = bn_beta_grad_np(relugrad, layout)
bn_gamma_ad = bn_gamma_grad_np(relugrad, inbn_cast, data_sum, layout)
return [bn_gamma_ad, bn_beta_ad]
def gen_data(shape, out_shape, dtype, out_dtype, layout):
support_list = {"float16": np.float16, "float32": np.float32}
head = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
data_sum = random_gaussian(out_shape, miu=1, sigma=0.1).astype(support_list[out_dtype])
in_bn = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
in_active = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
output = np.full(out_shape, np.nan, out_dtype)
expect = compute_expect(data_sum, in_bn, head, in_active, layout)
return head, data_sum, in_bn, in_active, output, expect
def test_fused_relu_grad_bn_update_grad(
shape,
out_shape,
dtype="float16",
layout="NHWC",
out_dtype="float32",
poly_sch=False):
shape_list = [out_shape, shape, shape, shape]
dtype_list = [out_dtype, dtype, dtype, dtype]
op_attrs = [layout]
if poly_sch:
mod = utils.op_build_test(
fused_relu_grad_bn_update_grad_auto,
shape_list,
dtype_list,
op_attrs=op_attrs,
kernel_name="fused_relu_grad_bn_update_grad_auto",
attrs={
"target": "cuda", "enable_akg_reduce_lib": True})
else:
mod = utils.op_build_test(
fused_relu_grad_bn_update_grad_manual,
shape_list,
dtype_list,
kernel_name="fused_relu_grad_bn_update_grad_manual",
op_attrs=op_attrs)
head, data_sum, in_bn, in_active, output, expect = gen_data(shape, out_shape, dtype, out_dtype, layout)
outputs = [output, output]
inputs = [data_sum, in_bn, head, in_active]
arg_list = inputs + outputs
outputs = utils.mod_launch(mod, arg_list, outputs=tuple(range(-len(outputs), 0)), expect=expect)
res = np.allclose(outputs, expect, rtol=5e-03, atol=1.e-8)
print("Test {}".format("Pass" if res else "Fail"))
if not res:
print("Error cuda:========================")
print(mod.imported_modules[0].get_source())
raise AssertionError("Test fail")
inputs = to_tvm_nd_array(inputs)
expect = to_tvm_nd_array(expect)
return True
|
the-stack_0_25731
|
#! /usr/bin/env python
"""bro %s -- compression/decompression utility using the Brotli algorithm."""
from __future__ import print_function
import argparse
import sys
import os
import brotli
import platform
# default values of encoder parameters
DEFAULT_PARAMS = {
'mode': brotli.MODE_GENERIC,
'quality': 11,
'lgwin': 22,
'lgblock': 0,
}
def get_binary_stdio(stream):
""" Return the specified standard input, output or errors stream as a
'raw' buffer object suitable for reading/writing binary data from/to it.
"""
assert stream in ['stdin', 'stdout', 'stderr'], "invalid stream name"
stdio = getattr(sys, stream)
if sys.version_info[0] < 3:
if sys.platform == 'win32':
# set I/O stream binary flag on python2.x (Windows)
runtime = platform.python_implementation()
if runtime == "PyPy":
# the msvcrt trick doesn't work in pypy, so I use fdopen
mode = "rb" if stream == "stdin" else "wb"
stdio = os.fdopen(stdio.fileno(), mode, 0)
else:
# this works with CPython -- untested on other implementations
import msvcrt
msvcrt.setmode(stdio.fileno(), os.O_BINARY)
return stdio
else:
# get 'buffer' attribute to read/write binary data on python3.x
if hasattr(stdio, 'buffer'):
return stdio.buffer
else:
orig_stdio = getattr(sys, "__%s__" % stream)
return orig_stdio.buffer
def main(args=None):
parser = argparse.ArgumentParser(
prog='bro.py',
description="Compression/decompression utility using the Brotli algorithm.")
parser.add_argument('--version', action='version', version=brotli.__version__)
parser.add_argument('-i', '--input', metavar='FILE', type=str, dest='infile',
help='Input file', default=None)
parser.add_argument('-o', '--output', metavar='FILE', type=str, dest='outfile',
help='Output file', default=None)
parser.add_argument('-f', '--force', action='store_true',
help='Overwrite existing output file', default=False)
parser.add_argument('-d', '--decompress', action='store_true',
help='Decompress input file', default=False)
params = parser.add_argument_group('optional encoder parameters')
params.add_argument('-m', '--mode', metavar="MODE", type=int, choices=[0, 1, 2],
help='The compression mode can be 0 for generic input, '
'1 for UTF-8 encoded text, or 2 for WOFF 2.0 font data. '
'Defaults to 0.')
params.add_argument('-q', '--quality', metavar="QUALITY", type=int,
choices=list(range(0, 12)),
help='Controls the compression-speed vs compression-density '
'tradeoff. The higher the quality, the slower the '
'compression. Range is 0 to 11. Defaults to 11.')
params.add_argument('--lgwin', metavar="LGWIN", type=int,
choices=list(range(10, 25)),
help='Base 2 logarithm of the sliding window size. Range is '
'10 to 24. Defaults to 22.')
params.add_argument('--lgblock', metavar="LGBLOCK", type=int,
choices=[0] + list(range(16, 25)),
help='Base 2 logarithm of the maximum input block size. '
'Range is 16 to 24. If set to 0, the value will be set based '
'on the quality. Defaults to 0.')
params.add_argument('--custom-dictionary', metavar="FILE", type=str, dest='dictfile',
help='Custom dictionary file.', default = None)
# set default values using global DEFAULT_PARAMS dictionary
parser.set_defaults(**DEFAULT_PARAMS)
options = parser.parse_args(args=args)
if options.infile:
if not os.path.isfile(options.infile):
parser.error('file "%s" not found' % options.infile)
with open(options.infile, "rb") as infile:
data = infile.read()
else:
if sys.stdin.isatty():
# interactive console, just quit
parser.error('no input')
infile = get_binary_stdio('stdin')
data = infile.read()
if options.outfile:
if os.path.isfile(options.outfile) and not options.force:
parser.error('output file exists')
outfile = open(options.outfile, "wb")
else:
outfile = get_binary_stdio('stdout')
if options.dictfile:
if not os.path.isfile(options.dictfile):
parser.error('file "%s" not found' % options.dictfile)
with open(options.dictfile, "rb") as dictfile:
custom_dictionary = dictfile.read()
else:
custom_dictionary = ''
try:
if options.decompress:
data = brotli.decompress(data, dictionary=custom_dictionary)
else:
data = brotli.compress(
data, mode=options.mode, quality=options.quality,
lgwin=options.lgwin, lgblock=options.lgblock, dictionary=custom_dictionary)
except brotli.error as e:
parser.exit(1,'bro: error: %s: %s' % (e, options.infile or 'sys.stdin'))
outfile.write(data)
outfile.close()
if __name__ == '__main__':
main()
|
the-stack_0_25732
|
from symbolic import SymbolicReference
import os
from git.objects import Object
from git.util import (
LazyMixin,
Iterable,
)
from gitdb.util import (
isfile,
hex_to_bin
)
__all__ = ["Reference"]
class Reference(SymbolicReference, LazyMixin, Iterable):
"""Represents a named reference to any object. Subclasses may apply restrictions though,
i.e. Heads can only point to commits."""
__slots__ = tuple()
_points_to_commits_only = False
_resolve_ref_on_create = True
_common_path_default = "refs"
def __init__(self, repo, path):
"""Initialize this instance
:param repo: Our parent repository
:param path:
Path relative to the .git/ directory pointing to the ref in question, i.e.
refs/heads/master"""
if not path.startswith(self._common_path_default+'/'):
raise ValueError("Cannot instantiate %r from path %s" % ( self.__class__.__name__, path ))
super(Reference, self).__init__(repo, path)
def __str__(self):
return self.name
def set_object(self, object, logmsg = None):
"""Special version which checks if the head-log needs an update as well"""
oldbinsha = None
if logmsg is not None:
head = self.repo.head
if not head.is_detached and head.ref == self:
oldbinsha = self.commit.binsha
#END handle commit retrieval
#END handle message is set
super(Reference, self).set_object(object, logmsg)
if oldbinsha is not None:
# /* from refs.c in git-source
# * Special hack: If a branch is updated directly and HEAD
# * points to it (may happen on the remote side of a push
# * for example) then logically the HEAD reflog should be
# * updated too.
# * A generic solution implies reverse symref information,
# * but finding all symrefs pointing to the given branch
# * would be rather costly for this rare event (the direct
# * update of a branch) to be worth it. So let's cheat and
# * check with HEAD only which should cover 99% of all usage
# * scenarios (even 100% of the default ones).
# */
self.repo.head.log_append(oldbinsha, logmsg)
#END check if the head
# NOTE: Don't have to overwrite properties as the will only work without a the log
@property
def name(self):
""":return: (shortest) Name of this reference - it may contain path components"""
# first two path tokens are can be removed as they are
# refs/heads or refs/tags or refs/remotes
tokens = self.path.split('/')
if len(tokens) < 3:
return self.path # could be refs/HEAD
return '/'.join(tokens[2:])
@classmethod
def iter_items(cls, repo, common_path = None):
"""Equivalent to SymbolicReference.iter_items, but will return non-detached
references as well."""
return cls._iter_items(repo, common_path)
|
the-stack_0_25733
|
"""
===========
Knuth Miles
===========
`miles_graph()` returns an undirected graph over the 128 US cities from. The
cities each have location and population data. The edges are labeled with the
distance between the two cities.
This example is described in Section 1.1 of
Donald E. Knuth, "The Stanford GraphBase: A Platform for Combinatorial
Computing", ACM Press, New York, 1993.
http://www-cs-faculty.stanford.edu/~knuth/sgb.html
The data file can be found at:
- https://github.com/networkx/networkx/blob/master/examples/drawing/knuth_miles.txt.gz
"""
import gzip
import re
import matplotlib.pyplot as plt
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
fh = gzip.open("knuth_miles.txt.gz", "r")
G = nx.Graph()
G.position = {}
G.population = {}
cities = []
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind = re.compile(r"^\d+")
if numfind.match(line): # this line is distances
dist = line.split()
for d in dist:
G.add_edge(city, cities[i], weight=int(d))
i = i + 1
else: # this line is a city, position, population
i = 1
(city, coordpop) = line.split("[")
cities.insert(0, city)
(coord, pop) = coordpop.split("]")
(y, x) = coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city] = (-int(x) + 7500, int(y) - 3000)
G.population[city] = float(pop) / 1000.0
return G
G = miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print(f"digraph has {nx.number_of_nodes(G)} nodes with {nx.number_of_edges(G)} edges")
# make new graph of cites, edge if less then 300 miles between them
H = nx.Graph()
for v in G:
H.add_node(v)
for (u, v, d) in G.edges(data=True):
if d["weight"] < 300:
H.add_edge(u, v)
# draw with matplotlib/pylab
plt.figure(figsize=(8, 8))
# with nodes colored by degree sized by population
node_color = [float(H.degree(v)) for v in H]
nx.draw(
H,
G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False,
)
# scale the axes equally
plt.xlim(-5000, 500)
plt.ylim(-2000, 3500)
plt.show()
|
the-stack_0_25734
|
#!/usr/bin/env python
# Created by "Thieu" at 10:50, 21/03/2022 ----------%
# Email: [email protected] %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
from mealpy.swarm_based import PFA
from mealpy.optimizer import Optimizer
import numpy as np
import pytest
@pytest.fixture(scope="module") # scope: Call only 1 time at the beginning
def problem():
def fitness_function(solution):
return np.sum(solution ** 2)
problem = {
"fit_func": fitness_function,
"lb": [-10, -10, -10, -10, -10],
"ub": [10, 10, 10, 10, 10],
"minmax": "min",
"log_to": None
}
return problem
def test_PFA_results(problem):
models = [
PFA.BasePFA(problem, epoch=10, pop_size=50),
]
for model in models:
best_position, best_fitness = model.solve()
assert isinstance(model, Optimizer)
assert isinstance(best_position, np.ndarray)
assert len(best_position) == len(problem["lb"])
|
the-stack_0_25735
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'文件io'
import os
with open('C:\\Users\\admin\Desktop\\tmp\\tianmlin.txt','r',encoding='utf-8') as f:
for line in f.readlines():
print(line.strip())#去掉结尾的换行
print(os.environ)
|
the-stack_0_25739
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and extract the MovieLens dataset from GroupLens website.
Download the dataset, and perform data-preprocessing to convert the raw dataset
into csv file to be used in model training and evaluation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import os
import sys
import time
import zipfile
import numpy as np
import pandas as pd
from six.moves import urllib # pylint: disable=redefined-builtin
import tensorflow as tf
from official.recommendation import constants # pylint: disable=g-bad-import-order
# URL to download dataset
_DATA_URL = "http://files.grouplens.org/datasets/movielens/"
_USER_COLUMN = "user_id"
_ITEM_COLUMN = "item_id"
_RATING_COLUMN = "rating"
_TIMESTAMP_COLUMN = "timestamp"
# The number of negative examples attached with a positive example
# in training dataset. It is set as 100 in the paper.
_NUMBER_NEGATIVES = 100
# In both datasets, each user has at least 20 ratings.
_MIN_NUM_RATINGS = 20
RatingData = collections.namedtuple(
"RatingData", ["items", "users", "ratings", "min_date", "max_date"])
def _print_ratings_description(ratings):
"""Describe the rating dataset information.
Args:
ratings: A pandas DataFrame of the rating dataset.
"""
info = RatingData(items=len(ratings[_ITEM_COLUMN].unique()),
users=len(ratings[_USER_COLUMN].unique()),
ratings=len(ratings),
min_date=ratings[_TIMESTAMP_COLUMN].min(),
max_date=ratings[_TIMESTAMP_COLUMN].max())
tf.logging.info("{ratings} ratings on {items} items from {users} users"
" from {min_date} to {max_date}".format(**(info._asdict())))
def process_movielens(ratings, sort=True):
"""Sort and convert timestamp of the MovieLens dataset.
Args:
ratings: A pandas DataFrame of the rating dataset.
sort: A boolean to indicate whether to sort the data based on timestamp.
Returns:
ratings: The processed pandas DataFrame.
"""
ratings[_TIMESTAMP_COLUMN] = pd.to_datetime(
ratings[_TIMESTAMP_COLUMN], unit="s")
if sort:
ratings.sort_values(by=_TIMESTAMP_COLUMN, inplace=True)
_print_ratings_description(ratings)
return ratings
def load_movielens_1_million(file_name, sort=True):
"""Load the MovieLens 1 million dataset.
The file has no header row, and each line is in the following format:
UserID::MovieID::Rating::Timestamp
- UserIDs range between 1 and 6040
- MovieIDs range between 1 and 3952
- Ratings are made on a 5-star scale (whole-star ratings only)
- Timestamp is represented in seconds since midnight Coordinated Universal
Time (UTC) of January 1, 1970.
- Each user has at least 20 ratings
Args:
file_name: A string of the file name to be loaded.
sort: A boolean to indicate whether to sort the data based on timestamp.
Returns:
A processed pandas DataFrame of the rating dataset.
"""
names = [_USER_COLUMN, _ITEM_COLUMN, _RATING_COLUMN, _TIMESTAMP_COLUMN]
ratings = pd.read_csv(file_name, sep="::", names=names, engine="python")
return process_movielens(ratings, sort=sort)
def load_movielens_20_million(file_name, sort=True):
"""Load the MovieLens 20 million dataset.
Each line of this file after the header row represents one rating of one movie
by one user, and has the following format:
userId,movieId,rating,timestamp
- The lines within this file are ordered first by userId, then, within user,
by movieId.
- Ratings are made on a 5-star scale, with half-star increments
(0.5 stars - 5.0 stars).
- Timestamps represent seconds since midnight Coordinated Universal Time
(UTC) of January 1, 1970.
- All the users had rated at least 20 movies.
Args:
file_name: A string of the file name to be loaded.
sort: A boolean to indicate whether to sort the data based on timestamp.
Returns:
A processed pandas DataFrame of the rating dataset.
"""
ratings = pd.read_csv(file_name)
names = {"userId": _USER_COLUMN, "movieId": _ITEM_COLUMN}
ratings.rename(columns=names, inplace=True)
return process_movielens(ratings, sort=sort)
def load_file_to_df(file_name, sort=True):
"""Load rating dataset into DataFrame.
Two data loading functions are defined to handle dataset ml-1m and ml-20m,
as they are provided with different formats.
Args:
file_name: A string of the file name to be loaded.
sort: A boolean to indicate whether to sort the data based on timestamp.
Returns:
A pandas DataFrame of the rating dataset.
"""
dataset_name = os.path.basename(file_name).split(".")[0]
# ml-1m with extension .dat
file_extension = ".dat"
func = load_movielens_1_million
if dataset_name == "ml-20m":
file_extension = ".csv"
func = load_movielens_20_million
ratings_file = os.path.join(file_name, "ratings" + file_extension)
return func(ratings_file, sort=sort)
def generate_train_eval_data(df, original_users, original_items):
"""Generate the dataset for model training and evaluation.
Given all user and item interaction information, for each user, first sort
the interactions based on timestamp. Then the latest one is taken out as
Test ratings (leave-one-out evaluation) and the remaining data for training.
The Test negatives are randomly sampled from all non-interacted items, and the
number of Test negatives is 100 by default (defined as _NUMBER_NEGATIVES).
Args:
df: The DataFrame of ratings data.
original_users: A list of the original unique user ids in the dataset.
original_items: A list of the original unique item ids in the dataset.
Returns:
all_ratings: A list of the [user_id, item_id] with interactions.
test_ratings: A list of [user_id, item_id], and each line is the latest
user_item interaction for the user.
test_negs: A list of item ids with shape [num_users, 100].
Each line consists of 100 item ids for the user with no interactions.
"""
# Need to sort before popping to get last item
tf.logging.info("Sorting user_item_map by timestamp...")
df.sort_values(by=_TIMESTAMP_COLUMN, inplace=True)
all_ratings = set(zip(df[_USER_COLUMN], df[_ITEM_COLUMN]))
user_to_items = collections.defaultdict(list)
# Generate user_item rating matrix for training
t1 = time.time()
row_count = 0
for row in df.itertuples():
user_to_items[getattr(row, _USER_COLUMN)].append(getattr(row, _ITEM_COLUMN))
row_count += 1
if row_count % 50000 == 0:
tf.logging.info("Processing user_to_items row: {}".format(row_count))
tf.logging.info(
"Process {} rows in [{:.1f}]s".format(row_count, time.time() - t1))
# Generate test ratings and test negatives
t2 = time.time()
test_ratings = []
test_negs = []
# Generate the 0-based index for each item, and put it into a set
all_items = set(range(len(original_items)))
for user in range(len(original_users)):
test_item = user_to_items[user].pop() # Get the latest item id
all_ratings.remove((user, test_item)) # Remove the test item
all_negs = all_items.difference(user_to_items[user])
all_negs = sorted(list(all_negs)) # determinism
test_ratings.append((user, test_item))
test_negs.append(list(np.random.choice(all_negs, _NUMBER_NEGATIVES)))
if user % 1000 == 0:
tf.logging.info("Processing user: {}".format(user))
tf.logging.info("Process {} users in {:.1f}s".format(
len(original_users), time.time() - t2))
all_ratings = list(all_ratings) # convert set to list
return all_ratings, test_ratings, test_negs
def parse_file_to_csv(data_dir, dataset_name):
"""Parse the raw data to csv file to be used in model training and evaluation.
ml-1m dataset is small in size (~25M), while ml-20m is large (~500M). It may
take several minutes to process ml-20m dataset.
Args:
data_dir: A string, the directory with the unzipped dataset.
dataset_name: A string, the dataset name to be processed.
"""
# Use random seed as parameter
np.random.seed(0)
# Load the file as DataFrame
file_path = os.path.join(data_dir, dataset_name)
df = load_file_to_df(file_path, sort=False)
# Get the info of users who have more than 20 ratings on items
grouped = df.groupby(_USER_COLUMN)
df = grouped.filter(lambda x: len(x) >= _MIN_NUM_RATINGS)
original_users = df[_USER_COLUMN].unique()
original_items = df[_ITEM_COLUMN].unique()
# Map the ids of user and item to 0 based index for following processing
tf.logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[_USER_COLUMN] = df[_USER_COLUMN].apply(lambda user: user_map[user])
df[_ITEM_COLUMN] = df[_ITEM_COLUMN].apply(lambda item: item_map[item])
assert df[_USER_COLUMN].max() == len(original_users) - 1
assert df[_ITEM_COLUMN].max() == len(original_items) - 1
# Generate data for train and test
all_ratings, test_ratings, test_negs = generate_train_eval_data(
df, original_users, original_items)
# Serialize to csv file. Each csv file contains three columns
# (user_id, item_id, interaction)
# As there are only two fields (user_id, item_id) in all_ratings and
# test_ratings, we need to add a fake rating to make three columns
df_train_ratings = pd.DataFrame(all_ratings)
df_train_ratings["fake_rating"] = 1
train_ratings_file = os.path.join(
FLAGS.data_dir, dataset_name + "-" + constants.TRAIN_RATINGS_FILENAME)
df_train_ratings.to_csv(
train_ratings_file,
index=False, header=False, sep="\t")
tf.logging.info("Train ratings is {}".format(train_ratings_file))
df_test_ratings = pd.DataFrame(test_ratings)
df_test_ratings["fake_rating"] = 1
test_ratings_file = os.path.join(
FLAGS.data_dir, dataset_name + "-" + constants.TEST_RATINGS_FILENAME)
df_test_ratings.to_csv(
test_ratings_file,
index=False, header=False, sep="\t")
tf.logging.info("Test ratings is {}".format(test_ratings_file))
df_test_negs = pd.DataFrame(test_negs)
test_negs_file = os.path.join(
FLAGS.data_dir, dataset_name + "-" + constants.TEST_NEG_FILENAME)
df_test_negs.to_csv(
test_negs_file,
index=False, header=False, sep="\t")
tf.logging.info("Test negatives is {}".format(test_negs_file))
def make_dir(file_dir):
if not tf.gfile.Exists(file_dir):
tf.logging.info("Creating directory {}".format(file_dir))
tf.gfile.MakeDirs(file_dir)
def main(_):
"""Download and extract the data from GroupLens website."""
tf.logging.set_verbosity(tf.logging.INFO)
make_dir(FLAGS.data_dir)
# Download the zip dataset
dataset_zip = FLAGS.dataset + ".zip"
file_path = os.path.join(FLAGS.data_dir, dataset_zip)
if not tf.gfile.Exists(file_path):
def _progress(count, block_size, total_size):
sys.stdout.write("\r>> Downloading {} {:.1f}%".format(
file_path, 100.0 * count * block_size / total_size))
sys.stdout.flush()
file_path, _ = urllib.request.urlretrieve(
_DATA_URL + dataset_zip, file_path, _progress)
statinfo = os.stat(file_path)
# A new line to clear the carriage return from download progress
# tf.logging.info is not applicable here
print()
tf.logging.info(
"Successfully downloaded {} {} bytes".format(
file_path, statinfo.st_size))
# Unzip the dataset
if not tf.gfile.Exists(os.path.join(FLAGS.data_dir, FLAGS.dataset)):
zipfile.ZipFile(file_path, "r").extractall(FLAGS.data_dir)
# Preprocess and parse the dataset to csv
train_ratings = FLAGS.dataset + "-" + constants.TRAIN_RATINGS_FILENAME
if not tf.gfile.Exists(os.path.join(FLAGS.data_dir, train_ratings)):
parse_file_to_csv(FLAGS.data_dir, FLAGS.dataset)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default="/tmp/movielens-data/",
help="Directory to download data and extract the zip.")
parser.add_argument(
"--dataset", type=str, default="ml-1m", choices=["ml-1m", "ml-20m"],
help="Dataset to be trained and evaluated.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(argv=[sys.argv[0]] + unparsed)
|
the-stack_0_25740
|
import matplotlib.pyplot as plt
# drawing used for math
def plot_2D_plane(right=5,up=5,left=-5,down=-5,fsize=(8,8)):
hpoints, vpoints = [],[]
for i in range(left,right+1):
if i!=0: hpoints.append(i)
for i in range(down,up+1):
if i!=0: vpoints.append(i)
ax = plt.figure(figsize=fsize).gca()
# Set identical scales for both axes
ax.set(xlim=(left-1,right+1), ylim=(down-1, up+1), aspect='equal')
# Remove top and right spines
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Set bottom and left spines as x and y axes of coordinate system
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
# Create minor ticks placed at each integer to enable drawing of minor grid
ax.set_xticks(hpoints)
ax.set_yticks(vpoints)
# Draw major and minor grid lines
ax.grid(which='both', color='grey', linewidth=1, linestyle='-', alpha=0.2)
# Create 'x' and 'y' labels placed at the end of the axes
ax.set_xlabel('x', size=14, labelpad=-24, x=1.03)
ax.set_ylabel('y', size=14, labelpad=-21, y=1.02, rotation=0)
# Draw arrows
arrow_fmt = dict(markersize=4, color='black', clip_on=False)
ax.plot((1), (0), marker='>', transform=ax.get_yaxis_transform(), **arrow_fmt)
ax.plot((0), (0), marker='<', transform=ax.get_yaxis_transform(), **arrow_fmt)
ax.plot((0), (1), marker='^', transform=ax.get_xaxis_transform(), **arrow_fmt)
ax.plot((0), (0), marker='v', transform=ax.get_xaxis_transform(), **arrow_fmt)
def draw_sides(x=1,y=2,side_color="b",lwidth=1):
plt.arrow(x,0,0,y,color=side_color,linestyle="dotted",width=0.001*lwidth)
plt.arrow(0,y,x,0,color=side_color,linestyle="dotted",width=0.001*lwidth)
def draw_vector(x=1,y=2,vname="v",show_name=True,vcolor="b",sides=False,side_color="b",lwidth=1):
plt.quiver(0,0,x,y,scale=1,scale_units='xy',angles = 'xy',color=vcolor,width=0.008*lwidth)
dx = x
if y<0: dy=y-0.3
else: dy = y+0.3
if show_name:
vector_name="$"+vname+"=("+str(x)+","+str(y)+")$"
plt.text(dx,dy,vector_name,color=vcolor)
if sides:
draw_sides(x,y,side_color)
def place_text(x,y,text,tcolor="blue"):
plt.text(x,y,text,color=tcolor)
def show_plt():
plt.show()
# drawing used for quantum
def draw_axes():
points = [ [1.2,0], [0,1.2], [-1.2,0], [0,-1.2] ] # dummy points for zooming out
arrows = [ [1.1,0], [0,1.1], [-1.1,0], [0,-1.1] ] # coordinates for the axes
for p in points:
plt.plot(p[0],p[1]+0.1) # drawing dummy points
for a in arrows:
plt.arrow(0,0,a[0],a[1],head_width=0.04, head_length=0.08) # drawing the axes
def draw_unit_circle():
unit_circle= plt.Circle((0,0),1,color='black',fill=False)
plt.gca().add_patch(unit_circle)
def draw_quantum_state(x,y,name):
# shorten the line length to 0.92
# line_length + head_length should be 1
x1 = 0.92 * x
y1 = 0.92 * y
plt.arrow(0,0,x1,y1,head_width=0.04,head_length=0.08,color="blue")
x2 = 1.15 * x
y2 = 1.15 * y
plt.text(x2,y2,name)
def draw_qubit():
# draw a figure
plt.figure(figsize=(6,6), dpi=60)
# draw the origin
plt.plot(0,0,'ro') # a point in red color
# drawing the axes by using one of our predefined function
draw_axes()
# drawing the unit circle by using one of our predefined function
draw_unit_circle()
# drawing |0>
plt.plot(1,0,"o")
plt.text(1.05,0.05,"|0>")
# drawing |1>
plt.plot(0,1,"o")
plt.text(0.05,1.05,"|1>")
# drawing -|0>
plt.plot(-1,0,"o")
plt.text(-1.2,-0.1,"-|0>")
# drawing -|1>
plt.plot(0,-1,"o")
plt.text(-0.2,-1.1,"-|1>")
def draw_qubit_grover():
# draw a figure
plt.figure(figsize=(7,7), dpi=60)
# draw the origin
plt.plot(0,0,'ro') # a point in red color
# drawing the axes by using one of our predefined function
draw_axes()
# drawing the unit circle by using one of our predefined function
draw_unit_circle()
# drawing |0>
plt.plot(1,0,"o")
plt.text(1.05,0.05,"|unmarked>")
# drawing |1>
plt.plot(0,1,"o")
plt.text(0.05,1.05,"|marked>")
# drawing -|0>
plt.plot(-1,0,"o")
plt.text(-0.98,-0.09,"-|unmarked>")
# drawing -|1>
plt.plot(0,-1,"o")
plt.text(-0.4,-1.1,"-|marked>")
|
the-stack_0_25742
|
import logging
import configparser
import datetime
import argparse
import tweepy
import os
import urllib.request
from .imgviewer import show_image
logger = logging.getLogger()
config = configparser.ConfigParser()
config.read("config.ini")
BEGIN_TIME = datetime.datetime.now()
ROWS, COLUMNS = os.popen("stty size", "r").read().split()
def get_thread(twitter, url):
status = twitter.get_status(url.split("/")[-1], tweet_mode="extended")
userid = status.author.id
yield status
while True:
found = False
statusid = status.id
curs = tweepy.Cursor(
twitter.user_timeline,
user_id=userid,
since_id=statusid,
tweet_mode="extended",
count=100,
).items()
for index, status in enumerate(curs, 0):
if status.in_reply_to_status_id == statusid:
yield status
found = True
if not found:
break
def twitterconnect():
consumer_key = config["twitter"]["consumer_key"]
secret_key = config["twitter"]["secret_key"]
access_token = config["twitter"]["access_token"]
access_token_secret = config["twitter"]["access_token_secret"]
auth = tweepy.OAuthHandler(consumer_key, secret_key)
auth.set_access_token(access_token, access_token_secret)
return tweepy.API(auth)
def main():
args = parse_args()
twitter = twitterconnect()
if not args.url:
logger.error("Use the -u flag to specify an url. Exiting.")
exit()
else:
url = args.url
for status in get_thread(twitter, url):
print(f"{'-' * int(COLUMNS)}")
# Display images if available
if args.display_images:
if hasattr(status, "extended_entities"):
if "media" in status.extended_entities:
for media in status.extended_entities["media"]:
urllib.request.urlretrieve(
media["media_url"], "/tmp/image.png"
)
show_image("/tmp/image.png")
print(status.full_text)
def parse_args():
parser = argparse.ArgumentParser(
description="Bot posting images from lastfm_cg to twitter or mastodon."
)
parser.add_argument(
"--debug",
help="Display debugging information",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO,
)
parser.add_argument("-u", "--url", help="Tweet URL.", type=str)
parser.add_argument(
"-i",
"--display_images",
help="Display images if available.",
dest="display_images",
action="store_true",
)
parser.set_defaults(display_images=False)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
return args
if __name__ == "__main__":
main()
|
the-stack_0_25743
|
DISTANCE_STOP_WORDS = ["a", "an", "the"]
NUMBERS_AS_WORDS = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten"
"eleven"
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventenn",
"eighteen",
"nineteen",
"twenty"
]
UNITS_MATRIX = {
"meter": {
"words": ["meters", "metres", "mtrs", "meter", "metre", "m"],
"multiplier": {
"foot": 3.28084,
"kilometer": 0.001,
"mile": 0.000621371
}
},
"foot": {
"words": ["foot", "feet", "ft"],
"multiplier": {
"meter": 0.3048,
"kilometer": 0.0003048,
"mile": 0.000189394
}
},
"kilometer": {
"words": ["kilometers", "kilometer", "km", "kms"],
"multiplier": {
"foot": 3280.84,
"meter": 1000,
"mile": 0.621371
}
},
"mile": {
"words": ["miles", "mile", "mi"],
"multiplier": {
"foot": 5280,
"meter": 1609.34,
"kilometer": 1.60934
}
}
}
|
the-stack_0_25744
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool to perform checkouts in one easy command line!
Usage:
fetch <config> [--property=value [--property2=value2 ...]]
This script is a wrapper around various version control and repository
checkout commands. It requires a |config| name, fetches data from that
config in depot_tools/fetch_configs, and then performs all necessary inits,
checkouts, pulls, fetches, etc.
Optional arguments may be passed on the command line in key-value pairs.
These parameters will be passed through to the config's main method.
"""
from __future__ import print_function
import json
import optparse
import os
import pipes
import subprocess
import sys
import textwrap
import git_common
from distutils import spawn
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
#################################################
# Checkout class definitions.
#################################################
class Checkout(object):
"""Base class for implementing different types of checkouts.
Attributes:
|base|: the absolute path of the directory in which this script is run.
|spec|: the spec for this checkout as returned by the config. Different
subclasses will expect different keys in this dictionary.
|root|: the directory into which the checkout will be performed, as returned
by the config. This is a relative path from |base|.
"""
def __init__(self, options, spec, root):
self.base = os.getcwd()
self.options = options
self.spec = spec
self.root = root
def exists(self):
pass
def init(self):
pass
def sync(self):
pass
def run(self, cmd, return_stdout=False, **kwargs):
print('Running: %s' % (' '.join(pipes.quote(x) for x in cmd)))
if self.options.dry_run:
return ''
if return_stdout:
return subprocess.check_output(cmd, **kwargs)
else:
try:
subprocess.check_call(cmd, **kwargs)
except subprocess.CalledProcessError as e:
# If the subprocess failed, it likely emitted its own distress message
# already - don't scroll that message off the screen with a stack trace
# from this program as well. Emit a terse message and bail out here;
# otherwise a later step will try doing more work and may hide the
# subprocess message.
print('Subprocess failed with return code %d.' % e.returncode)
sys.exit(e.returncode)
return ''
class GclientCheckout(Checkout):
def run_gclient(self, *cmd, **kwargs):
if not spawn.find_executable('gclient'):
cmd_prefix = (sys.executable, os.path.join(SCRIPT_PATH, 'gclient.py'))
else:
cmd_prefix = ('gclient',)
return self.run(cmd_prefix + cmd, **kwargs)
def exists(self):
try:
gclient_root = self.run_gclient('root', return_stdout=True).strip()
return (os.path.exists(os.path.join(gclient_root, '.gclient')) or
os.path.exists(os.path.join(os.getcwd(), self.root)))
except subprocess.CalledProcessError:
pass
return os.path.exists(os.path.join(os.getcwd(), self.root))
class GitCheckout(Checkout):
def run_git(self, *cmd, **kwargs):
print('Running: git %s' % (' '.join(pipes.quote(x) for x in cmd)))
if self.options.dry_run:
return ''
return git_common.run(*cmd, **kwargs)
class GclientGitCheckout(GclientCheckout, GitCheckout):
def __init__(self, options, spec, root):
super(GclientGitCheckout, self).__init__(options, spec, root)
assert 'solutions' in self.spec
def _format_spec(self):
def _format_literal(lit):
if isinstance(lit, str) or (sys.version_info.major == 2 and
isinstance(lit, unicode)):
return '"%s"' % lit
if isinstance(lit, list):
return '[%s]' % ', '.join(_format_literal(i) for i in lit)
return '%r' % lit
soln_strings = []
for soln in self.spec['solutions']:
soln_string = '\n'.join(' "%s": %s,' % (key, _format_literal(value))
for key, value in soln.items())
soln_strings.append(' {\n%s\n },' % soln_string)
gclient_spec = 'solutions = [\n%s\n]\n' % '\n'.join(soln_strings)
extra_keys = ['target_os', 'target_os_only', 'cache_dir']
gclient_spec += ''.join('%s = %s\n' % (key, _format_literal(self.spec[key]))
for key in extra_keys if key in self.spec)
return gclient_spec
def init(self):
# Configure and do the gclient checkout.
self.run_gclient('config', '--spec', self._format_spec())
sync_cmd = ['sync']
if self.options.nohooks:
sync_cmd.append('--nohooks')
if self.options.no_history:
sync_cmd.append('--no-history')
if self.spec.get('with_branch_heads', False):
sync_cmd.append('--with_branch_heads')
self.run_gclient(*sync_cmd)
# Configure git.
wd = os.path.join(self.base, self.root)
if self.options.dry_run:
print('cd %s' % wd)
self.run_git(
'submodule', 'foreach',
'git config -f $toplevel/.git/config submodule.$name.ignore all',
cwd=wd)
if not self.options.no_history:
self.run_git(
'config', '--add', 'remote.origin.fetch',
'+refs/tags/*:refs/tags/*', cwd=wd)
self.run_git('config', 'diff.ignoreSubmodules', 'all', cwd=wd)
CHECKOUT_TYPE_MAP = {
'gclient': GclientCheckout,
'gclient_git': GclientGitCheckout,
'git': GitCheckout,
}
def CheckoutFactory(type_name, options, spec, root):
"""Factory to build Checkout class instances."""
class_ = CHECKOUT_TYPE_MAP.get(type_name)
if not class_:
raise KeyError('unrecognized checkout type: %s' % type_name)
return class_(options, spec, root)
#################################################
# Utility function and file entry point.
#################################################
def usage(msg=None):
"""Print help and exit."""
if msg:
print('Error:', msg)
print(textwrap.dedent("""\
usage: %s [options] <config> [--property=value [--property2=value2 ...]]
This script can be used to download the Chromium sources. See
http://www.chromium.org/developers/how-tos/get-the-code
for full usage instructions.
Valid options:
-h, --help, help Print this message.
--nohooks Don't run hooks after checkout.
--force (dangerous) Don't look for existing .gclient file.
-n, --dry-run Don't run commands, only print them.
--no-history Perform shallow clones, don't fetch the full git history.
Valid fetch configs:""") % os.path.basename(sys.argv[0]))
configs_dir = os.path.join(SCRIPT_PATH, 'fetch_configs')
configs = [f[:-3] for f in os.listdir(configs_dir) if f.endswith('.py')]
configs.sort()
for fname in configs:
print(' ' + fname)
sys.exit(bool(msg))
def handle_args(argv):
"""Gets the config name from the command line arguments."""
if len(argv) <= 1:
usage('Must specify a config.')
if argv[1] in ('-h', '--help', 'help'):
usage()
dry_run = False
nohooks = False
no_history = False
force = False
while len(argv) >= 2:
arg = argv[1]
if not arg.startswith('-'):
break
argv.pop(1)
if arg in ('-n', '--dry-run'):
dry_run = True
elif arg == '--nohooks':
nohooks = True
elif arg == '--no-history':
no_history = True
elif arg == '--force':
force = True
else:
usage('Invalid option %s.' % arg)
def looks_like_arg(arg):
return arg.startswith('--') and arg.count('=') == 1
bad_parms = [x for x in argv[2:] if not looks_like_arg(x)]
if bad_parms:
usage('Got bad arguments %s' % bad_parms)
config = argv[1]
props = argv[2:]
return (
optparse.Values({
'dry_run': dry_run,
'nohooks': nohooks,
'no_history': no_history,
'force': force}),
config,
props)
def run_config_fetch(config, props, aliased=False):
"""Invoke a config's fetch method with the passed-through args
and return its json output as a python object."""
config_path = os.path.abspath(
os.path.join(SCRIPT_PATH, 'fetch_configs', config))
if not os.path.exists(config_path + '.py'):
print("Could not find a config for %s" % config)
sys.exit(1)
cmd = [sys.executable, config_path + '.py', 'fetch'] + props
result = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
spec = json.loads(result)
if 'alias' in spec:
assert not aliased
return run_config_fetch(
spec['alias']['config'], spec['alias']['props'] + props, aliased=True)
cmd = [sys.executable, config_path + '.py', 'root']
result = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
root = json.loads(result)
return spec, root
def run(options, spec, root):
"""Perform a checkout with the given type and configuration.
Args:
options: Options instance.
spec: Checkout configuration returned by the the config's fetch_spec
method (checkout type, repository url, etc.).
root: The directory into which the repo expects to be checkout out.
"""
assert 'type' in spec
checkout_type = spec['type']
checkout_spec = spec['%s_spec' % checkout_type]
try:
checkout = CheckoutFactory(checkout_type, options, checkout_spec, root)
except KeyError:
return 1
if not options.force and checkout.exists():
print('Your current directory appears to already contain, or be part of, ')
print('a checkout. "fetch" is used only to get new checkouts. Use ')
print('"gclient sync" to update existing checkouts.')
print()
print('Fetch also does not yet deal with partial checkouts, so if fetch')
print('failed, delete the checkout and start over (crbug.com/230691).')
return 1
return checkout.init()
def main():
options, config, props = handle_args(sys.argv)
spec, root = run_config_fetch(config, props)
return run(options, spec, root)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
the-stack_0_25745
|
"""
Boto3 ServiceResource collections parser, produces `structures.Collection`.
"""
from typing import List
from boto3.resources.base import ServiceResource as Boto3ServiceResource
from mypy_boto3_builder.parsers.shape_parser import ShapeParser
from mypy_boto3_builder.service_name import ServiceName
from mypy_boto3_builder.structures.argument import Argument
from mypy_boto3_builder.structures.collection import Collection
from mypy_boto3_builder.structures.method import Method
from mypy_boto3_builder.type_annotations.internal_import import InternalImport
from mypy_boto3_builder.type_annotations.type import Type
from mypy_boto3_builder.type_annotations.type_subscript import TypeSubscript
from mypy_boto3_builder.utils.strings import get_class_prefix
def parse_collections(
parent_name: str,
resource: Boto3ServiceResource,
service_name: ServiceName,
shape_parser: ShapeParser,
) -> List[Collection]:
"""
Extract collections from boto3 resource.
Arguments:
resource -- boto3 service resource.
Returns:
A list of Collection structures.
"""
result: List[Collection] = []
for collection in resource.meta.resource_model.collections:
if not collection.resource:
continue
object_class_name = collection.resource.type
collection_record = Collection(
name=f"{parent_name}{get_class_prefix(collection.name)}Collection",
parent_name=parent_name,
attribute_name=collection.name,
docstring=(
f"[{parent_name}.{collection.name} documentation]"
f"({service_name.doc_link}.{parent_name}.{collection.name})"
),
type_annotation=InternalImport(collection.name),
)
self_type = InternalImport(collection_record.name, stringify=True)
collection_record.methods.append(Method("all", [Argument("self", None)], self_type))
filter_method = shape_parser.get_collection_filter_method(
collection_record.name, collection, self_type
)
filter_method.type_ignore = True
collection_record.methods.append(filter_method)
batch_methods = shape_parser.get_collection_batch_methods(
collection_record.name, collection
)
for batch_method in batch_methods:
collection_record.methods.append(batch_method)
collection_record.methods.append(
Method(
"limit",
[Argument("self", None), Argument("count", Type.int)],
self_type,
)
)
collection_record.methods.append(
Method(
"page_size",
[Argument("self", None), Argument("count", Type.int)],
self_type,
)
)
collection_record.methods.append(
Method(
"pages",
[Argument("self", None)],
TypeSubscript(
Type.Iterator,
[TypeSubscript(Type.List, [InternalImport(name=object_class_name)])],
),
)
)
collection_record.methods.append(
Method(
"__iter__",
[Argument("self", None)],
TypeSubscript(Type.Iterator, [InternalImport(name=object_class_name)]),
)
)
result.append(collection_record)
return result
|
the-stack_0_25747
|
import networkx as nx
import random
# Read data from the dataset, and create graph G_fb
G_fb = nx.read_edgelist("facebook_combined.txt", create_using=nx.Graph(), nodetype=int)
# Show the number of edges in G_fb
print("edges = " + str(G_fb.number_of_edges()))
# Show number of nodes in G_fb
print("nodes = " + str(G_fb.number_of_nodes()))
# TASK1. Now your task is to compute the probability whether there is an edge between two vertices.
edge_probab = G_fb.number_of_edges() / (
((G_fb.number_of_nodes() - 1) * G_fb.number_of_nodes()) / 2
)
# TASK2. Compute the ACC (average clustering coefficient) of G_fb
# (consult the NetworkX manual or the video lecture for the correct function which does it)
av_clust_coeff = nx.average_clustering(G_fb)
print("acc = " + str(av_clust_coeff))
# Now we have to generate a random graph. First we initialize it
G_rand = nx.Graph()
k = G_fb.number_of_nodes()
# TASK3. generate edges in G_rand at random:
for i in range(0, k):
for j in range(0, i):
# Add an edge between vertices i and j, with probability edge_probab (as in G_fb)
if random.random() < edge_probab:
G_rand.add_edge(i, j)
# Now we print out the number of edges and the ACC of the new graph
print("rgraph_edges = " + str(G_rand.number_of_edges()))
av_clust_coeff = nx.average_clustering(G_rand)
print("rgraph_acc = " + str(av_clust_coeff))
# The results which should be submitted to the grader include the ACC of G_fb and of G_rand. Good luck!
|
the-stack_0_25748
|
""" 85 - Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que
mantenha separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente. """
# Minha resposta:
"""
numeros = list()
pares = []
impares = []
for v in range(1, 8):
numeros.append(int(input(f'Digite o {v}º valor: ')))
for n in numeros:
if n % 2 == 0:
pares.append(n)
else:
impares.append(n)
print('-=' * 30)
print(f'Os valores pares digitados foram: {sorted(pares)}')
print(f'Os valores ímpares digitados foram: {sorted(impares)}')
"""
# Resposta do professor:
numeros = [[], []]
valor = 0
for c in range(1, 8):
valor = int(input(f'Digite o {c}º valor: '))
if valor % 2 == 0:
numeros[0].append(valor)
else:
numeros[1].append(valor)
print('-=' * 30)
numeros[0].sort()
numeros[1].sort()
print(f'Os valores pares digitados foram: {numeros[0]}')
print(f'Os valores ímpares digitados foram: {numeros[1]}')
|
the-stack_0_25750
|
import os
import requests
from src.job import AbstractJob
from src.notifier.email_notifier import send_code_template, send_error
TO_EMAIL_ADDRESS = os.environ.get("TO_EMAIL_ADDRESS")
class Job(AbstractJob):
name = "World Time Job"
def __init__(self) -> None:
super().__init__()
def run(self):
res = requests.get("http://worldtimeapi.org/api/timezone/America/Toronto")
if res.status_code != 200:
self.fail()
else:
data = res.json()
self.success(data)
def success(self, code: str):
# send_code_template(self.name, code, TO_EMAIL_ADDRESS)
print("Success: In practice, to send an email, comment out the line above")
print("world time:")
print(code)
def fail(self):
send_error("Error from cron crawler", "Error getting world time", TO_EMAIL_ADDRESS)
|
the-stack_0_25751
|
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib import constants
from tempest.common import compute
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin.common import ip as ip_utils
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
CONF = config.CONF
class NetworkConnectivityTest(base.BaseTempestTestCase):
credentials = ['primary', 'admin']
@classmethod
@utils.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(NetworkConnectivityTest, cls).resource_setup()
# Create keypair with admin privileges
cls.keypair = cls.create_keypair()
# Create security group with admin privileges
cls.secgroup = cls.create_security_group(
name=data_utils.rand_name('secgroup'))
# Execute funcs to achieve ssh and ICMP capabilities
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
def _create_servers(self, port_1, port_2):
params = {
'flavor_ref': CONF.compute.flavor_ref,
'image_ref': CONF.compute.image_ref,
'key_name': self.keypair['name']
}
vms = []
vms.append(
self.create_server(networks=[{'port': port_1['id']}], **params))
if (CONF.compute.min_compute_nodes > 1 and
compute.is_scheduler_filter_enabled("DifferentHostFilter")):
params['scheduler_hints'] = {
'different_host': [vms[0]['server']['id']]}
vms.append(
self.create_server(networks=[{'port': port_2['id']}], **params))
for vm in vms:
self.wait_for_server_active(vm['server'])
return vms
@decorators.idempotent_id('8944b90d-1766-4669-bd8a-672b5d106bb7')
def test_connectivity_through_2_routers(self):
ap1_net = self.create_network()
ap2_net = self.create_network()
wan_net = self.create_network()
ap1_subnet = self.create_subnet(
ap1_net, cidr="10.10.210.0/24", gateway="10.10.210.254")
ap2_subnet = self.create_subnet(
ap2_net, cidr="10.10.220.0/24", gateway="10.10.220.254")
self.create_subnet(
wan_net, cidr="10.10.200.0/24", gateway="10.10.200.254")
ap1_rt = self.create_router(
router_name=data_utils.rand_name("ap1_rt"),
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
ap2_rt = self.create_router(
router_name=data_utils.rand_name("ap2_rt"),
admin_state_up=True)
ap1_internal_port = self.create_port(
ap1_net, security_groups=[self.secgroup['id']])
ap2_internal_port = self.create_port(
ap2_net, security_groups=[self.secgroup['id']])
ap1_wan_port = self.create_port(wan_net)
ap2_wan_port = self.create_port(wan_net)
self.client.add_router_interface_with_port_id(
ap1_rt['id'], ap1_wan_port['id'])
self.client.add_router_interface_with_port_id(
ap2_rt['id'], ap2_wan_port['id'])
self.create_router_interface(ap1_rt['id'], ap1_subnet['id'])
self.create_router_interface(ap2_rt['id'], ap2_subnet['id'])
self.client.update_router(
ap1_rt['id'],
routes=[{"destination": ap2_subnet['cidr'],
"nexthop": ap2_wan_port['fixed_ips'][0]['ip_address']}])
self.client.update_router(
ap2_rt['id'],
routes=[{"destination": ap1_subnet['cidr'],
"nexthop": ap1_wan_port['fixed_ips'][0]['ip_address']}])
servers = self._create_servers(ap1_internal_port, ap2_internal_port)
ap1_fip = self.create_and_associate_floatingip(
ap1_internal_port['id'])
ap1_sshclient = ssh.Client(
ap1_fip['floating_ip_address'], CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
self.check_remote_connectivity(
ap1_sshclient, ap2_internal_port['fixed_ips'][0]['ip_address'],
servers=servers)
@decorators.idempotent_id('b72c3b77-3396-4144-b05d-9cd3c0099893')
def test_connectivity_router_east_west_traffic(self):
"""This case is intended to test router east west taffic
The case can be used in various scenarios: legacy/distributed router,
same/different host.
"""
net_1 = self.create_network()
net_2 = self.create_network()
subnet_1 = self.create_subnet(net_1, cidr="10.10.1.0/24")
subnet_2 = self.create_subnet(net_2, cidr="10.10.2.0/24")
router = self.create_router(
router_name=data_utils.rand_name("east_west_traffic_router"),
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
internal_port_1 = self.create_port(
net_1, security_groups=[self.secgroup['id']])
internal_port_2 = self.create_port(
net_2, security_groups=[self.secgroup['id']])
self.create_router_interface(router['id'], subnet_1['id'])
self.create_router_interface(router['id'], subnet_2['id'])
servers = self._create_servers(internal_port_1, internal_port_2)
fip = self.create_and_associate_floatingip(
internal_port_1['id'])
sshclient = ssh.Client(
fip['floating_ip_address'], CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
self.check_remote_connectivity(
sshclient, internal_port_2['fixed_ips'][0]['ip_address'],
ping_count=10, servers=servers)
@utils.requires_ext(extension="dvr", service="network")
@decorators.idempotent_id('69d3650a-5c32-40bc-ae56-5c4c849ddd37')
def test_connectivity_dvr_and_no_dvr_routers_in_same_subnet(self):
"""This test case tests connectivity between vm and 2 routers.
Subnet is connected to dvr and non-dvr routers in the same time, test
ensures that connectivity from VM to both routers is working.
Test scenario: (NOTE: 10.1.0.0/24 private CIDR is used as an example)
+----------------+ +------------+
| Non-dvr router | | DVR router |
| | | |
| 10.1.0.1 | | 10.1.0.x |
+-------+--------+ +-----+------+
| |
| 10.1.0.0/24 |
+----------------+----------------+
|
+-+-+
|VM |
+---+
where:
10.1.0.1 - is subnet's gateway IP address,
10.1.0.x - is any other IP address taken from subnet's range
Test ensures that both 10.1.0.1 and 10.1.0.x IP addresses are
reachable from VM.
"""
ext_network = self.client.show_network(self.external_network_id)
for ext_subnetid in ext_network['network']['subnets']:
ext_subnet = self.os_admin.network_client.show_subnet(ext_subnetid)
ext_cidr = ext_subnet['subnet']['cidr']
if ext_subnet['subnet']['ip_version'] == constants.IP_VERSION_4:
break
else:
self.fail('No IPv4 subnet was found in external network %s' %
ext_network['network']['id'])
subnet_cidr = ip_utils.find_valid_cidr(used_cidr=ext_cidr)
gw_ip = netaddr.IPAddress(subnet_cidr.first + 1)
network = self.create_network()
subnet = self.create_subnet(
network, cidr=str(subnet_cidr), gateway=str(gw_ip))
non_dvr_router = self.create_router_by_client(
tenant_id=self.client.tenant_id,
is_admin=True,
router_name=data_utils.rand_name("nondvr-2-routers-same-network"),
admin_state_up=True,
distributed=False)
self.create_router_interface(non_dvr_router['id'], subnet['id'])
dvr_router = self.create_router_by_client(
tenant_id=self.client.tenant_id,
is_admin=True,
router_name=data_utils.rand_name("dvr-2-rotuers-same-network"),
admin_state_up=True,
distributed=True)
dvr_router_port = self.create_port(network)
self.client.add_router_interface_with_port_id(
dvr_router['id'], dvr_router_port['id'])
vm = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'uuid': network['id']}],
security_groups=[{'name': self.secgroup['name']}])
self.wait_for_server_active(vm['server'])
vm_port = self.client.list_ports(
network_id=network['id'], device_id=vm['server']['id'])['ports'][0]
fip = self.create_and_associate_floatingip(vm_port['id'])
sshclient = ssh.Client(
fip['floating_ip_address'], CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
self.check_remote_connectivity(
sshclient, str(gw_ip), ping_count=10, servers=[vm])
self.check_remote_connectivity(
sshclient, dvr_router_port['fixed_ips'][0]['ip_address'],
ping_count=10, servers=[vm])
|
the-stack_0_25752
|
# -*- coding: utf-8 -*-
# Copyright 2017 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.2@@
import logging
DEFAULT_LANGUAGE = u"nl"
translations = \
{
u'nl': {
u'collection_broadcast': u'Morgen (%(date)s) zijn er volgende ophalingen\n-%(collections)s',
}
}
def translate_key(language, key, suppress_warning=False, _duplicate_backslashes=False, **kwargs):
if not language:
language = DEFAULT_LANGUAGE
if not key:
raise ValueError("key is a required argument")
language = language.replace('-', '_')
if not language in translations:
if '_' in language:
language = language.split('_')[0]
if not language in translations:
language = DEFAULT_LANGUAGE
else:
language = DEFAULT_LANGUAGE
if key in translations[language]:
s = translations[language][key]
else:
if key not in translations[DEFAULT_LANGUAGE]:
raise ValueError("Translation key '%s' not found for default language" % (key))
if not suppress_warning:
logging.warn("Translation key '%s' not found for language '%s' - fallback to default" % (key, language))
s = translations[DEFAULT_LANGUAGE][key]
if kwargs:
s = s % kwargs
if _duplicate_backslashes:
s = s.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t').replace("'", "\\'").replace('"', '\\"')
return s
|
the-stack_0_25754
|
#
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
def pytest_addoption(parser):
parser.addoption("--runmacro", action="store_true",
help="run macroreduce tests that require both lambda and stand-alone instances")
parser.addoption("--runlambda", action="store_true",
help="run tests that can only be run on lambda")
parser.addoption("--runcmdtest", action="store_true",
help="run command line tests")
|
the-stack_0_25755
|
from django.contrib import admin
# Register your models here.
from .models import Search
class SearchAdmin(admin.ModelAdmin):
date_hierarchy ='timestamp'
list_display = ['query','search_count','timestamp']
list_filter =['search_count','timestamp']
admin.site.register(Search,SearchAdmin)
|
the-stack_0_25756
|
import os
import time
import pytest
from ipvc import IPVC
from helpers import NAMESPACE, REPO, REPO2, get_environment, write_file
def test_stage_diff():
ipvc = get_environment()
ipvc.repo.init()
test_file = REPO / 'test_file.txt'
write_file(test_file, 'hello world')
ipvc.stage.add(test_file)
assert len(ipvc.stage.diff()) == 1
ipvc.stage.commit('my commit')
assert len(ipvc.stage.diff()) == 0
time.sleep(1) # sleep to make sure we get another time stamp
write_file(test_file, 'hello world2')
assert len(ipvc.stage.diff()) == 0
assert len(ipvc.diff.run(files=True)) == 1
changes = ipvc.stage.add(test_file)
assert len(changes) == 1
assert len(ipvc.stage.diff()) == 1
diff = ipvc.diff.run(files=True)
assert len(diff) == 0
|
the-stack_0_25757
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
===============================================
Get ica component and decide rejecting ones on Enrico data using MNE and - local computer version
===============================================
We firstly import subject list from sbatch
we define session list (1, 2), state list (VD, FA, OP), reject dict,
then we import eeglab format Raw data of one state during one session for single subject with MNE package. We apply:
1) make sure that there is event 254 and event 255, and crop the raw data between 254 and 255
2) a notch filter to remove powerline artifact (50 Hz)
3) a 1Hz-100Hz band-pass filter
Then concatenate the data of the same session with annotation engineering, detail utils please turn to
utils_preProcessingWorkflowJuly05.py
3) ASR and ICA fitting:
====> output = subj+session_ica fif file that save ica mixing matrix for one session for one subject
Note:
1. exception : subject 36, some subject can have several 254,255 events
------ please refer to excel Enrico recording summary.xlsx
2. events code: state + condition + session
1. state: 1:VD 2:FA 3:OP
2. condition: 1:baseline 2:safe 3:threat
3. session: 1:session1 2:session2
3. we fix sampling rate at 512 = for those file whose sfreq = 2048, we do a downsampling
Suggestions:
1) decide infomation storage format
2)
Updated on July 2019
@author: Gansheng TAN [email protected] based on Manu's codes
"""
############################################################## Set-up header ###########################################
import mne
import importlib
import numpy as np
import numpy.matlib
from mne.report import Report
from autoreject import AutoReject
from autoreject import compute_thresholds
from autoreject import get_rejection_threshold
import matplotlib
import matplotlib.pyplot as plt # noqa
import matplotlib.patches as patches # noqa
from autoreject import set_matplotlib_defaults # noqa
from utils_ASR import *
from utils_preProcessingWorkflowJuly05 import *
from scipy.linalg import toeplitz
from scipy import signal
import sys
import encodings
import os
matplotlib.use('Agg')
mne.set_log_level('WARNING')
##################### OS path in INSERM computer #####################################################################
# raw_data_path = '/home/gansheng.tan/process_mne/INSERM_EEG_Enrico_Proc/data_eeglab/raw_data/'
# montage_fname = '/home/gansheng.tan/process_mne/INSERM_EEG_Enrico_Proc/data_eeglab/raw_data/Biosemi64_MAS_EOG.locs'
# # report_path = '/home/gansheng.tan/process_mne/INSERM_EEG_Enrico_Proc/report/'
# full_epochs_path = '/home/gansheng.tan/process_mne/INSERM_EEG_Enrico_Proc/data_eeglab/full_epochs_data/'
#
##################### OS path in cluster ######################################################################
raw_data_path = '/mnt/data/gansheng/raw_data/'
montage_fname = '/mnt/data/gansheng/raw_data/Biosemi64_MAS_EOG.locs'
preProc_ica_path = '/home/gansheng.tan/process_mne/INSERM_EEG_Enrico_Proc/data_eeglab/preProc_ica/'
report_path = '/home/gansheng.tan/process_mne/INSERM_EEG_Enrico_Proc/report/'
# full_epochs_path = '/mnt/data/gansheng/preClean_data/'
########################################## Algorithme parameter ############################################
cutoff = 10
pca_n_comp = 0.98
decim = 2
########################################## Initialization parameter##########################################
subj_list = [sys.argv[1]]
# subj_list = ['94']
session_list=['1','2']
#state list defines the concatenating order
# state_list = ['VD','FA','OP']
state_list = ['VD','FA','OP']
power_freq_array = [50]
reject_raw_data_session1 = {'29':['VD','FA','OP'],'30':['FA'],'36':['OP'],'74':['FA','OP','VD']}
reject_raw_data_session2 = {'74':['FA','OP','VD'],'55':['VD']}
# bad channel rejection is not apllied in the preproc, bad channels will be defined by eyes later
bad_channels={'94':{'1':['P2']},
'07':{'1':['Iz']},
'12':{'1':['P2']},
'14':{'1':['P2']},
'19':{'1':['P2','TP8','T7']},
'25':{'1':['T8'],'2':['FC1','C3','PO4']},
'26':{'1':['T8'],'2':['CPz']},
'28':{'2':['CP2','PO7','Oz','POz']},
'29':{'2':['T7','F6','F4','F8']},
'30':{'1':['C6','F6','FC6']},
'32':{'1':['P2'],'2':['P2','TP7']},
'34':{'1':['P2'],'2':['P2']},
'35':{'1':['T7','T8'],'2':['T8','PO8']},
'36':{'1':['P2','PO4'],'2':['P2','PO4']},
'37':{'1':['Iz']},
'38':{'2':['TP8']},
'39':{'1':['P2'],'2':['P2','FT8']},
'40':{'1':['P2','TP7'],'2':['P2','TP7']},
'42':{'1':['P2'],'2':['P2']},
'50':{'1':['T7']},
'51':{'1':['P2'],'2':['P2']},
'54':{'1':['P2'],'2':['P2','T7']},
'56':{'1':['P2','T7'],'2':['P2','TP7']},
'57':{'1':['P2'],'2':['P2']},
'58':{'1':['P2','T8'],'2':['PO4']},
'59':{'1':['P2','PO4','FC4']},
'60':{'1':['P2'],'2':['P2']},
'61':{'1':['P2']},
'62':{'2':['TP8']},
'63':{'2':['PO8']},
'68':{'1':['P2'],'2':['P2']},
'64':{'1':['C1']},
'70':{'1':['PO4','O2','FC3','FC5'],'2':['PO4','O2']},
'71':{'1':['P2','Iz'],'2':['P2','Iz','C1','Cz']},
'73':{'2':['FCz']},
'79':{'1':['P2','POz'],'2':['P2','POz','T7']},
'81':{'1':['Iz','Oz','Pz','CPz','PO4','P2','POz'],'2':['P2','PO4','FC1','C1','Pz']},
'78':{'1':['P2'],'2':['P2']},
'82':{'1':['P2']},
'83':{'1':['T7'],'2':['T7']},
'87':{'2':['P2']},
'65':{'1':['P2'],'2':['P2']},
'90':{'1':['T7','P2'],'2':['P2']},
'91':{'1':['P2'],'2':['P2']},
'93':{'1':['PO4'],'2':['PO4']},
'95':{'1':['P2'],'2':['P2']}
}
################################ step00: cut and filter data and concatenate 3 recording in one session ############
###### set up montage
montage_biosemi=mne.channels.read_montage(montage_fname)
###### preproc for each raw file
for subj in subj_list:
psd_full_figs=[]
psd_full_caption=[]
session2conctn_list=[]
############### single subject report ###########################
rep = Report(image_format = 'png', subject = 'subj0'+subj)
for session in session_list:
psd_figs=[]
psd_captions=[]
ASR_figs=[]
ASR_captions=[]
ica_figs=[]
ica_captions=[]
autoR_figs=[]
autoR_captions=[]
reject_state=[]
conctn_list = []
conctn_anno_list=[]
# conctn_dict = {}
if subj in eval('reject_raw_data_session'+session).keys():
reject_state = eval('reject_raw_data_session'+session)[subj]
print("the rejected states of subject {} in session {} are {}".format(subj,session,reject_state))
for state in state_list:
raw_fname = raw_data_path + 'subj0'+subj+'_'+state+session+'_mast.set'
if state in reject_state:
continue
elif os.path.isfile(raw_fname)==False:
print('raw data file is missing: subject{}, session{}, state{}'.format(subj,session,state))
else:
raw = mne.io.read_raw_eeglab(raw_fname,montage_biosemi,verbose='INFO',preload=True,eog='auto')
if raw.info['sfreq'] != 512:
raw.resample(sfreq=512, npad = 'auto')
events = mne.events_from_annotations(raw)
if subj == '36' and session == '2' and state == 'VD':
raw.crop(tmin=220)
events = mne.events_from_annotations(raw)
events = events_time_forward(events,220)
events_coding=events[1]
events=np.asarray(events[0])
if '254.0' not in events_coding.keys():
raw.annotations.append(onset=0,duration=0,description='254.0')
events = mne.events_from_annotations(raw)
events_coding=events[1]
events=np.asarray(events[0])
if '255.0' not in events_coding.keys():
raw.annotations.append(onset=events[-1][0]/512,duration=0,description='255.0')
events = mne.events_from_annotations(raw)
events_coding=events[1]
events=np.asarray(events[0])
events_code_start = events_coding['254.0']
start = events[events[:,2]==events_code_start][0][0]
events_code_end = events_coding['255.0']
stop = events[events[:,2]==events_code_end][0][0]
raw_cut_filt = raw.copy()
raw_cut_filt.crop(tmin = start/raw.info['sfreq'], tmax = stop/raw.info['sfreq'])
raw_cut_filt.notch_filter(freqs=power_freq_array)
raw_cut_filt.filter(l_freq=1,h_freq=100)
psd_figs.append(raw_cut_filt.plot_psd(show = False))
psd_captions.append('subject '+subj+"'s "+'psd plot after cut and filtering in session'
+session+ ' during '+state+' state')
############ annotation engineering ################
index_dlt=0
for i in range(raw_cut_filt.annotations.__len__()):
if (raw_cut_filt.annotations.__getitem__(i-index_dlt)['description']) not in ['131.0','132.0','255.0']:
raw_cut_filt.annotations.delete(i-index_dlt)
index_dlt+=1
else:
continue
mne_annotation_recode_by_adding(session=session,state=state,annotations=raw_cut_filt.annotations)
conctn_anno_list.append(raw_cut_filt.annotations)
conctn_list.append(raw_cut_filt)
################### Concatenation process #################################
if len(conctn_list)== 0:
continue
else:
full_array = conctn_list[0]._data
full_info = conctn_list[0].info
del conctn_list[0]
for raw2conctn in conctn_list:
full_array = np.concatenate((full_array,raw2conctn._data),axis=1)
raw_full = mne.io.RawArray(full_array,info = full_info)
full_annotation = conctn_anno_list[0]
del conctn_anno_list[0]
for annos2conctn in conctn_anno_list:
mne_annotation_postpone (pptime=full_annotation.__getitem__(full_annotation.__len__()-1)['onset'],
annotations=annos2conctn)
full_annotation = full_annotation.__add__(annos2conctn)
raw_full.set_annotations(full_annotation)
if subj in bad_channels.keys() and session in bad_channels[subj].keys() :
raw_full.info['bads']=bad_channels[subj][session]
###########raw_full now is for one session
############### step01: epochs engineering - calibration-epochs-ASR #################################
rawCalibAsr = raw_full.copy()
rawCalibAsr = rawCalibAsr.crop(tmin=10,tmax=150)
rawCalibAsr_noYW = rawCalibAsr.copy()
rawCalibAsr._data,iirstate = YW_filter(rawCalibAsr._data, rawCalibAsr.info['sfreq'],None)
rawVEOG= rawCalibAsr.copy()
rawVEOG = rawVEOG.pick_channels(['VEOG'])
VEOG_data = np.squeeze(rawVEOG.get_data())
peak_locs, peak_eeg = mne.preprocessing.peak_finder(VEOG_data, thresh = 110e-6)
lengthblink = 0.5*rawCalibAsr.info['sfreq']
startremoveblink = peak_locs-(lengthblink/2)
stopremoveblink = peak_locs+(lengthblink/2)
NbsampCalibAsrWindow = len(VEOG_data)
startremoveblink = np.abs((startremoveblink>0)*startremoveblink)
stopremoveblink = (stopremoveblink>NbsampCalibAsrWindow-1)*NbsampCalibAsrWindow + (stopremoveblink<NbsampCalibAsrWindow-1)*stopremoveblink
Mask=np.zeros(NbsampCalibAsrWindow)
for ix2remove in range(len(startremoveblink)):
Mask[int(startremoveblink[ix2remove]):int(stopremoveblink[ix2remove])]=1
rawCalibAsr_noYW.pick_types(eeg=True)
rawdata_noblink = np.delete(rawCalibAsr_noYW.get_data(),np.where(Mask),axis=1)
SignalCalib=np.delete(rawdata_noblink,np.where(np.abs(rawdata_noblink)>75e-6)[1],axis=1)
ref_maxbadchannels = 0.2
ref_tolerances = [-3.5,5.5]
ref_wndlen = 1
SignalClean,sample_mask = clean_windows(SignalCalib,rawCalibAsr.info['sfreq'],ref_maxbadchannels,ref_tolerances,ref_wndlen)
SignalClean_raw = mne.io.RawArray(SignalClean,rawCalibAsr_noYW.info)
ASR_figs.append(SignalClean_raw.plot(scalings = 140e-6,n_channels=64,duration = 10,show = False))
ASR_captions.append('SignalClean plot same scaling as raw 150e-6')
srate = rawCalibAsr.info['sfreq']
cutoff = cutoff
asr_state = asr_calibrate(SignalClean,srate,cutoff)
if subj in bad_channels.keys() and session in bad_channels[subj].keys() :
raw_bad = raw_full.copy()
raw_bad.pick(raw_bad.info['bads'])
raw4detect = raw_full.copy()
raw4detect.pick_types(eeg=True)
raw_full_eeg=raw_full.copy()
raw_full_eeg.pick_types(eeg=True)
raw_full_eog=raw_full.copy()
raw_full_eog.pick_types(eog=True)
raw4detect._data,iirstate = YW_filter(raw4detect._data,raw4detect.info['sfreq'],None)
events = mne.events_from_annotations(raw_full)
for i in range(len(events[0][:,2])):
events[0][i][2]=int(float(dict_getValue(events[1],events[0][i][2])))
for key in events[1].keys():
events[1][key] = int(float(key))
events_time_description = events[0]
i=0
while i <len(events_time_description[:,2]):
if events_time_description[i][2]==255:
events_time_description=np.delete(events_time_description,i,0)
else:
i+=1
events_dict=events[1]
events_dict=removeItem_from_dict(events_dict,'255.0')
sfreq=raw_full.info['sfreq']
i=0
overflow=False
while overflow==False:
if events_time_description[i,0]+sfreq*2>=events_time_description[-1][0]:
overflow=True
elif events_time_description[i+1,0]-events_time_description[i,0]>sfreq*2:
events_time_description=np.insert(events_time_description,i+1,
[sfreq*2+events_time_description[i,0],0,events_time_description[i,2]],
axis=0)
i+=1
else:
i+=1
events=(events_time_description,events_dict)
epochs4detect=mne.Epochs(raw4detect,events=events[0],event_id = events[1],tmin=0, tmax=2,preload=True)
epochs_full=mne.Epochs(raw_full_eeg,events=events[0], event_id = events[1],tmin=0, tmax=2,preload=True)
epochs_eog_raw_filt = mne.Epochs(raw_full_eog,events=events[0], event_id = events[1],tmin=0, tmax=2,preload=True)
if subj in bad_channels.keys() and session in bad_channels[subj].keys() :
epochs_bad_channels = mne.Epochs(raw_bad,events=events[0], event_id = events[1],tmin=0, tmax=2,preload=True)
Data4detect = epochs4detect.get_data()
Data2correct = epochs_full.get_data()
DataClean = np.zeros((Data2correct.shape))
num_epochs2correct = 0
num_epochscorrected = 0
for i_epoch in range(Data2correct.shape[0]):
num_epochs2correct+=1
Epoch4detect = Data4detect[i_epoch,:,:]
Epoch2corr = Data2correct[i_epoch,:,:]
DataClean[i_epoch,:,:],reconstruct = asr_process_on_epoch(Epoch2corr,Epoch4detect,asr_state)
if reconstruct ==True:
num_epochscorrected +=1
print('ASR correcting rate is {}'.format(num_epochscorrected/num_epochs2correct))
ASR_figs.append(plt.figure())
ASR_captions.append('ASR correcting rate is '+str(num_epochscorrected/num_epochs2correct))
epochs_ASR_clean = mne.EpochsArray(DataClean,info=epochs_full.info,events=events[0],event_id = events[1])
epochs_ASR_clean.add_channels([epochs_eog_raw_filt])
if subj in bad_channels.keys() and session in bad_channels[subj].keys() :
epochs_ASR_clean.add_channels([epochs_bad_channels])
epochs_ASR_clean.interpolate_bads()
# epochs_ASR_clean.plot(scalings=100e-6,n_epochs=5)
############### step02 ICA components check ##########################
ica = mne.preprocessing.ICA(n_components=pca_n_comp, method='fastica', random_state=11, max_iter=100)
ica.fit(epochs_ASR_clean,decim=decim)
preProc_ica_fname = preProc_ica_path+'subj0'+subj+'session'+session+'preProc_ica.fif'
ica.save(preProc_ica_fname)
|
the-stack_0_25758
|
import sys, os
from busmap.urbsweb.mapa import DataDir, MapRegion
from PIL import Image
import logging
logger = logging.getLogger(__name__)
dbg = logger.debug
warn = logger.warning
info = logger.info
BOTTOM_CROP = 17
def combine_map_images(ddir, linha):
# region-size of the largest image we could imagine building:
LARGE_IMAGE = 10000.0
rnames = ddir.get('linha/%s/all_region_names' % (linha))
dbg('gathering image sizes...')
persize = {}
for rname in rnames:
r = MapRegion(ddir, linha, rname)
img = r.image()
isize = img.size
rsize = r.size()
region_per_pixel = [float(rsize[i])/isize[i] for i in (0,1)]
pixels_for_bigimage = [int(LARGE_IMAGE/region_per_pixel[i]) for i in (0,1)]
k = (tuple(pixels_for_bigimage), isize)
persize.setdefault(k, []).append(rname)
dbg('looking for best size...')
# look for best size combination:
#dbg('sizes: %r', repr(persize))
bestsize = None
bestcount = 0
for size,names in persize.items():
count = len(names)
dbg('count for %r: %r', size, count)
if bestsize is None or count > bestcount:
bestsize = size
bestcount = count
info("best size: %r", bestsize)
rnames = persize[bestsize]
pixels_for_bigimage,imgsize = bestsize
region_per_pixel = [(LARGE_IMAGE/pixels_for_bigimage[i]) for i in (0,1)]
dbg('region/pixel ratio: %r', region_per_pixel)
region_for_img = [region_per_pixel[i]*imgsize[i] for i in (0,1)]
dbg('region size for each image: %r', region_for_img)
def regions():
for rname in rnames:
yield MapRegion(ddir, linha, rname)
minx = min([r.val('minx') for r in regions()])
miny = min([r.val('miny') for r in regions()])
maxx = max([r.val('maxx') for r in regions()])
maxy = max([r.val('maxy') for r in regions()])
dbg('lat/lng ranges: %r-%r (%r), %r-%r (%r)', minx, maxx, maxx-minx, miny, maxy, maxy-miny)
image_region_size = (maxx-minx, maxy-miny)
image_pixel_size = [int(image_region_size[i]/region_per_pixel[i]) for i in (0,1)]
dbg('image pixel size: %r', image_pixel_size)
img = Image.new('RGB', image_pixel_size)
for r in regions():
subimg = r.image()
#XXX: hack to avoid the white rectangle over the box
cropimg = subimg.crop((0, 0, subimg.size[0], subimg.size[1]-BOTTOM_CROP))
# 0,0 (top-left corner) is minx,maxx
dbg('ranges of image are: %r-%r (%r), %r-%r (%r)', r.val('minx'), r.val('maxx'), r.width(), r.val('miny'), r.val('maxy'), r.height())
region_offset = (r.val('minx')-minx, maxy-r.val('maxy'))
dbg('region offset is: %r', region_offset)
pixel_offset = tuple([int(region_offset[i]/region_per_pixel[i]) for i in (0,1)])
dbg('pasting at pixel offset: %r', pixel_offset)
img.paste(cropimg, pixel_offset)
fname = ddir._file_for_key('linha/%s/full_map.png' % (linha))
dbg('saving to: %r', fname)
img.save(fname, 'png')
info('saved to: %s', fname)
def main(argv):
loglevel = logging.INFO
args = []
i = 0
while i < len(sys.argv[1:]):
arg = sys.argv[1+i]
if arg == '-d':
loglevel = logging.DEBUG
else:
args.append(arg)
i += 1
logging.basicConfig(stream=sys.stderr, level=loglevel)
dirpath = args[0]
ddir = DataDir(dirpath)
linha = args[1]
combine_map_images(ddir, linha)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
the-stack_0_25761
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import setuptools
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setuptools.setup(
name = 'openstack-dashboard',
version = '0.4',
url = 'https://github.com/cloudbuilders/openstack-dashboard.git',
license = 'Apache 2.0',
description = "A Django interface for OpenStack.",
long_description = read('README'),
author = 'Devin Carlen',
author_email = '[email protected]',
data_files = [],
install_requires = ['setuptools', 'mox>=0.5.0'],
zip_safe = False,
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
]
)
|
the-stack_0_25762
|
def show_low_support_classes(dset):
"""
dset = merged
coarse = merged
"""
# aid = list(dset.anns.values())[0]['id']
# dset.show_annotation(aid)
dset._remove_keypoint_annotations()
gids = sorted([gid for gid, aids in dset.gid_to_aids.items() if aids])
catfreq = dset.category_annotation_frequency()
inspect_cids = []
for name, freq in catfreq.items():
if freq > 0 and freq < 50:
cid = dset.name_to_cat[name]['id']
inspect_cids.append(cid)
inspect_gids = list(set(ub.flatten(ub.take(dset.cid_to_gids, inspect_cids))))
# inspect_gids = [gid for gid in inspect_gids if 'habcam' not in dset.imgs[gid]['file_name']]
import utool as ut
if ut.inIPython():
import IPython
IPython.get_ipython().magic('pylab qt5 --no-import-all')
print('inspect_gids = {!r}'.format(inspect_gids))
from matplotlib import pyplot as plt
for gid in ut.InteractiveIter(inspect_gids):
img = dset.imgs[gid]
print('img = {}'.format(ub.repr2(img)))
aids = dset.gid_to_aids[gid]
primary_aid = None
anns = list(ub.take(dset.anns, aids))
for ann in anns:
ann = ann.copy()
ann['category'] = dset.cats[ann['category_id']]['name']
print('ann = {}'.format(ub.repr2(ann)))
if primary_aid is None:
if ann['category_id'] in inspect_cids:
primary_aid = ann['id']
try:
fig = plt.figure(1)
fig.clf()
dset.show_annotation(primary_aid, gid=gid)
fig.canvas.draw()
except Exception:
print('cannot draw')
# # import utool as ut
# for gid in gids:
# fig = plt.figure(1)
# fig.clf()
# dset.show_annotation(gid=gid)
# fig.canvas.draw()
def setup_detectron(train_dset, test_dset):
cfg = viame_wrangler.config.WrangleConfig()
train_dset._ensure_imgsize()
test_dset._ensure_imgsize()
print('Writing')
train_dset.dump(join(cfg.challenge_work_dir, 'phase0-merged-train.mscoco.json'))
test_dset.dump(join(cfg.challenge_work_dir, 'phase0-merged-test.mscoco.json'))
num_classes = len(train_dset.cats)
print('num_classes = {!r}'.format(num_classes))
# Make a detectron yaml file
config_text = ub.codeblock(
"""
MODEL:
TYPE: generalized_rcnn
CONV_BODY: ResNet.add_ResNet50_conv4_body
NUM_CLASSES: {num_classes}
FASTER_RCNN: True
NUM_GPUS: 1
SOLVER:
WEIGHT_DECAY: 0.0001
LR_POLICY: steps_with_decay
BASE_LR: 0.01
GAMMA: 0.1
# 1x schedule (note TRAIN.IMS_PER_BATCH: 1)
MAX_ITER: 180000
STEPS: [0, 120000, 160000]
RPN:
SIZES: (32, 64, 128, 256, 512)
FAST_RCNN:
ROI_BOX_HEAD: ResNet.add_ResNet_roi_conv5_head
ROI_XFORM_METHOD: RoIAlign
TRAIN:
WEIGHTS: https://s3-us-west-2.amazonaws.com/detectron/ImageNetPretrained/MSRA/R-50.pkl
DATASETS: ('/work/viame-challenge-2018/phase0-merged-train.mscoco.json',)
IM_DIR: '/data/viame-challenge-2018/phase0-imagery'
SCALES: (800,)
MAX_SIZE: 1333
IMS_PER_BATCH: 1
BATCH_SIZE_PER_IM: 512
TEST:
DATASETS: ('/work/viame-challenge-2018/phase0-merged-test.mscoco.json',)
IM_DIR: '/data/viame-challenge-2018/phase0-imagery'
SCALES: (800,)
MAX_SIZE: 1333
NMS: 0.5
FORCE_JSON_DATASET_EVAL: True
RPN_PRE_NMS_TOP_N: 6000
RPN_POST_NMS_TOP_N: 1000
OUTPUT_DIR: /work/viame-challenge-2018/output
""")
config_text = config_text.format(
num_classes=num_classes,
)
ub.writeto(join(cfg.challenge_work_dir, 'phase0-faster-rcnn.yaml'), config_text)
docker_cmd = ('nvidia-docker run '
'-v {work_dir}:/work -v {data_dir}:/data '
'-it detectron:c2-cuda9-cudnn7 bash').format(
work_dir=cfg.work_dir, data_dir=cfg.data_dir)
train_cmd = ('python2 tools/train_net.py '
'--cfg /work/viame-challenge-2018/phase0-faster-rcnn.yaml '
'OUTPUT_DIR /work/viame-challenge-2018/output')
hacks = ub.codeblock(
"""
git remote add Erotemic https://github.com/Erotemic/Detectron.git
git fetch --all
git checkout general_dataset
python2 tools/train_net.py --cfg /work/viame-challenge-2018/phase0-faster-rcnn.yaml OUTPUT_DIR /work/viame-challenge-2018/output
""")
print(docker_cmd)
print(train_cmd)
|
the-stack_0_25763
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Logging."""
import builtins
import decimal
import logging
import os
import sys
import xnas.core.distributed as dist
import simplejson
from xnas.core.config import cfg
# Show filename and line number in logs
_FORMAT = "[%(filename)s: %(lineno)3d]: %(message)s"
# Log file name (for cfg.LOG_DEST = 'file')
_LOG_FILE = "stdout.log"
# Data output with dump_log_data(data, data_type) will be tagged w/ this
_TAG = "json_stats: "
# Data output with dump_log_data(data, data_type) will have data[_TYPE]=data_type
_TYPE = "_type"
def _suppress_print():
"""Suppresses printing from the current process."""
def ignore(*_objects, _sep=" ", _end="\n", _file=sys.stdout, _flush=False):
pass
builtins.print = ignore
def setup_logging():
"""Sets up the logging."""
# Enable logging only for the master process
if dist.is_master_proc():
# Clear the root logger to prevent any existing logging config
# (e.g. set by another module) from messing with our setup
logging.root.handlers = []
# Construct logging configuration
logging_config = {"level": logging.INFO, "format": _FORMAT}
# Log either to stdout or to a file
if cfg.LOG_DEST == "stdout":
logging_config["stream"] = sys.stdout
else:
logging_config["filename"] = os.path.join(cfg.OUT_DIR, _LOG_FILE)
# Configure logging
logging.basicConfig(**logging_config)
else:
_suppress_print()
def get_logger(name):
"""Retrieves the logger."""
return logging.getLogger(name)
def dump_log_data(data, data_type, prec=4):
"""Covert data (a dictionary) into tagged json string for logging."""
data[_TYPE] = data_type
data = float_to_decimal(data, prec)
data_json = simplejson.dumps(data, sort_keys=True, use_decimal=True)
return "{:s}{:s}".format(_TAG, data_json)
def float_to_decimal(data, prec=4):
"""Convert floats to decimals which allows for fixed width json."""
if isinstance(data, dict):
return {k: float_to_decimal(v, prec) for k, v in data.items()}
if isinstance(data, float):
return decimal.Decimal(("{:." + str(prec) + "f}").format(data))
else:
return data
# def get_log_files(log_dir, name_filter=""):
# """Get all log files in directory containing subdirs of trained models."""
# names = [n for n in sorted(os.listdir(log_dir)) if name_filter in n]
# files = [os.path.join(log_dir, n, _LOG_FILE) for n in names]
# f_n_ps = [(f, n) for (f, n) in zip(files, names) if os.path.exists(f)]
# files, names = zip(*f_n_ps) if f_n_ps else [], []
# return files, names
# def load_log_data(log_file, data_types_to_skip=()):
# """Loads log data into a dictionary of the form data[data_type][metric][index]."""
# # Load log_file
# assert os.path.exists(log_file), "Log file not found: {}".format(log_file)
# with open(log_file, "r") as f:
# lines = f.readlines()
# # Extract and parse lines that start with _TAG and have a type specified
# lines = [_l[_l.find(_TAG) + len(_TAG):] for _l in lines if _TAG in _l]
# lines = [simplejson.loads(_l) for _l in lines]
# lines = [_l for _l in lines if _TYPE in _l and not _l[_TYPE]
# in data_types_to_skip]
# # Generate data structure accessed by data[data_type][index][metric]
# data_types = [_l[_TYPE] for _l in lines]
# data = {t: [] for t in data_types}
# for t, line in zip(data_types, lines):
# del line[_TYPE]
# data[t].append(line)
# # Generate data structure accessed by data[data_type][metric][index]
# for t in data:
# metrics = sorted(data[t][0].keys())
# err_str = "Inconsistent metrics in log for _type={}: {}".format(
# t, metrics)
# assert all(sorted(d.keys()) == metrics for d in data[t]), err_str
# data[t] = {m: [d[m] for d in data[t]] for m in metrics}
# return data
# def sort_log_data(data):
# """Sort each data[data_type][metric] by epoch or keep only first instance."""
# for t in data:
# if "epoch" in data[t]:
# epoch = [float(e.split("/")[0]) for e in data[t]["epoch"]]
# if "iter" in data[t]:
# i_cur = [float(i.split("/")[0]) for i in data[t]["iter"]]
# i_max = [float(i.split("/")[1]) for i in data[t]["iter"]]
# epoch = [e + (ic - 1.0) / im for e, ic,
# im in zip(epoch, i_cur, i_max)]
# for m in data[t]:
# data[t][m] = [v for _, v in sorted(zip(epoch, data[t][m]))]
# else:
# data[t] = {m: d[0] for m, d in data[t].items()}
# return data
|
the-stack_0_25765
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
from . import common
from pyalgotrade import bar
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.barfeed import sqlitefeed
from pyalgotrade import marketsession
from pyalgotrade import strategy
from pyalgotrade.technical import ma
from pyalgotrade.technical import cross
class NikkeiSpyStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, smaPeriod):
strategy.BacktestingStrategy.__init__(self, feed)
assert(smaPeriod > 3)
self.__lead = "^n225"
self.__lag = "spy"
self.__adjClose = feed[self.__lead].getAdjCloseDataSeries()
# Exit signal is more sensitive than entry.
self.__fastSMA = ma.SMA(self.__adjClose, int(smaPeriod/2))
self.__slowSMA = ma.SMA(self.__adjClose, smaPeriod)
self.__pos = None
def onEnterCanceled(self, position):
assert(position == self.__pos)
self.__pos = None
def onExitOk(self, position):
assert(position == self.__pos)
self.__pos = None
def __calculatePosSize(self):
cash = self.getBroker().getCash()
lastPrice = self.getFeed()[self.__lag][-1].getClose()
ret = cash / lastPrice
return int(ret)
def onBars(self, bars):
if bars.getBar(self.__lead):
if cross.cross_above(self.__adjClose, self.__slowSMA) == 1 and self.__pos is None:
shares = self.__calculatePosSize()
if shares:
self.__pos = self.enterLong(self.__lag, shares)
elif cross.cross_below(self.__adjClose, self.__fastSMA) == 1 and self.__pos is not None:
self.__pos.exitMarket()
class TestCase(common.TestCase):
def __testDifferentTimezonesImpl(self, feed):
self.assertTrue("^n225" in feed)
self.assertTrue("spy" in feed)
self.assertTrue("cacho" not in feed)
strat = NikkeiSpyStrategy(feed, 34)
strat.run()
self.assertEqual(round(strat.getResult(), 2), 1033854.48)
def testDifferentTimezones(self):
# Market times in UTC:
# - TSE: 0hs ~ 6hs
# - US: 14:30hs ~ 21hs
feed = yahoofeed.Feed()
for year in [2010, 2011]:
feed.addBarsFromCSV("^n225", common.get_data_file_path("nikkei-%d-yahoofinance.csv" % year), marketsession.TSE.getTimezone())
feed.addBarsFromCSV("spy", common.get_data_file_path("spy-%d-yahoofinance.csv" % year), marketsession.USEquities.getTimezone())
self.__testDifferentTimezonesImpl(feed)
def testDifferentTimezones_DBFeed(self):
feed = sqlitefeed.Feed(common.get_data_file_path("multiinstrument.sqlite"), bar.Frequency.DAY)
feed.loadBars("^n225")
feed.loadBars("spy")
self.__testDifferentTimezonesImpl(feed)
def testDifferentTimezones_DBFeed_LocalizedBars(self):
feed = sqlitefeed.Feed(common.get_data_file_path("multiinstrument.sqlite"), bar.Frequency.DAY)
feed.loadBars("^n225", marketsession.TSE.getTimezone())
feed.loadBars("spy", marketsession.USEquities.getTimezone())
self.__testDifferentTimezonesImpl(feed)
|
the-stack_0_25766
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 Ahmet Bakan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module contains unit tests for :mod:`~prody.atomic`."""
__author__ = 'Ahmet Bakan'
__copyright__ = 'Copyright (C) 2010-2012 Ahmet Bakan'
import os.path
import pickle
from numpy.testing import *
from prody import *
from prody import LOGGER
from prody.atomic.fields import READONLY
from prody.tests import unittest, TEMPDIR
from prody.tests.test_datafiles import *
LOGGER.verbosity = 'none'
ATOL = 1e-5
RTOL = 0
ATOMS = parseDatafile('multi_model_truncated', subset='ca')
class TestCopying(unittest.TestCase):
def TestCopyAtomGroup(self):
atoms = ATOMS.copy()
assert_equal(atoms.getCoordsets(), ATOMS.getCoordsets())
for label in ATOMS.getDataLabels():
if label in READONLY:
continue
assert_equal(atoms.getData(label), ATOMS.getData(label))
def TestCopyChain(self):
CHAIN = ATOMS['A']
chain = CHAIN.copy()
assert_equal(chain.getCoordsets(), CHAIN.getCoordsets())
for label in ATOMS.getDataLabels():
if label in READONLY:
continue
assert_equal(chain.getData(label), CHAIN.getData(label),
'failed to copy ' + label)
def TestCopyAtom(self):
ATOM = ATOMS[0]
atom = ATOM.copy()
assert_equal(atom[0].getCoordsets(), ATOM.getCoordsets())
for label in ATOMS.getDataLabels():
if label in READONLY:
continue
assert_equal(atom[0].getData(label), ATOM.getData(label),
'failed to copy ' + label)
def TestCopySelstr(self):
SELECTION = ATOMS.calpha
selection = SELECTION.copy()
assert_equal(selection.getCoordsets(), SELECTION.getCoordsets())
for label in ATOMS.getDataLabels():
if label in READONLY:
continue
assert_equal(selection.getData(label), SELECTION.getData(label),
'failed to copy ' + label)
class TestSaveLoad(unittest.TestCase):
def testSaveLoad(self):
atoms = loadAtoms(saveAtoms(ATOMS, os.path.join(TEMPDIR, 'atoms')))
assert_equal(atoms.getCoordsets(), ATOMS.getCoordsets())
for label in ATOMS.getDataLabels():
assert_equal(atoms.getData(label), ATOMS.getData(label),
'failed to load ' + label)
class TestPickling(unittest.TestCase):
def testAtomGroup(self):
atoms1 = parseDatafile('multi_model_truncated', subset='ca')
atoms2 = pickle.loads(pickle.dumps(atoms1))
s1 = atoms1.__getstate__()
s2 = atoms2.__getstate__()
for key in s1:
assert_equal(s1[key], s2[key])
self.assertEqual(atoms1, atoms2)
def testSelection(self):
sel = parseDatafile('multi_model_truncated', subset='ca')[:3]
self.assertEqual(sel, pickle.loads(pickle.dumps(sel)))
def testAtom(self):
atom = parseDatafile('multi_model_truncated', subset='ca')[0]
self.assertEqual(atom, pickle.loads(pickle.dumps(atom)))
|
the-stack_0_25767
|
import numpy as np
from numpy.lib.index_tricks import IndexExpression
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.datasets import Planetoid
from torch_geometric.utils import to_networkx
import networkx as nx
from sklearn.metrics import accuracy_score
from sklearn.manifold import TSNE
from sklearn.svm import SVC
dataset = Planetoid(root="data/Cora", name="Cora")
# print(dataset.num_classes)
# print(dataset.num_edge_features)
# CoraNet = to_networkx(dataset.data)
# CoraNet = CoraNet.to_undirected()
# Node_degree = pd.DataFrame(data=CoraNet.degree, columns=["Node", "Degree"])
# Node_degree = Node_degree.sort_values(by=["Degree"], ascending=False)
# Node_degree = Node_degree.reset_index(drop=True)
# Node_degree.iloc[0:30, :].plot(
# x="Node", y="Degree", kind="bar", figsize=(10, 7))
# Node_class = dataset.data.y.data.numpy()
# plt.xlabel("Node", size=12)
# plt.ylabel("Degree", size=12)
# plt.show()
# pos = nx.spring_layout(CoraNet)
# nodecolor = ["red", "blue", "green", "yellow", "peru", "violet", "cyan"]
# nodelabel = np.array(list(CoraNet.nodes))
# plt.figure(figsize=(16, 12))
# for ii in np.arange(len(np.unique(Node_class))):
# nodelist = nodelabel[Node_class == ii]
# nx.draw_networkx_nodes(CoraNet, pos, nodelist=list(
# nodelist), node_size=50, node_color=nodecolor[ii], alpha=0.8)
# nx.draw_networkx_edges(CoraNet, pos, width=1, edge_color="black")
# plt.show()
class GCNnet(torch.nn.Module):
def __init__(self, input_feature, num_classes):
super(GCNnet, self).__init__()
self.input_feature = input_feature
self.num_classes = num_classes
self.conv1 = GCNConv(input_feature, 32)
self.conv2 = GCNConv(32, num_classes)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x)
x = self.conv2(x, edge_index)
return F.softmax(x, dim=1)
input_feature = dataset.num_node_features
num_classes = dataset.num_classes
mygcn = GCNnet(input_feature, num_classes)
device = torch.device("cuda"if torch.cuda.is_available()else "cpu")
model = mygcn.to(device)
data = dataset[0].to(device)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
train_loss_all = []
val_loss_all = []
model.train()
for epoch in range(200):
optimizer.zero_grad()
out = model(data)
loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
train_loss_all.append(loss.data.cpu().numpy())
loss = F.cross_entropy(out[data.val_mask], data.y[data.val_mask])
val_loss_all.append(loss.data.cpu().numpy())
if epoch % 20 == 0:
print("epoch:", epoch, "; Train Loss",
train_loss_all[-1], "; Val Loss", val_loss_all[-1])
plt.figure(figsize=(10, 6))
plt.plot(train_loss_all, "ro-", label="Train loss")
plt.plot(val_loss_all, "bs-", label="Val Loss")
plt.legend()
plt.grid()
plt.xlabel("epoch", size=13)
plt.ylabel("loss", size=13)
plt.title("Graph Convolutional Networks", size=14)
plt.show()
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
model.conv1.register_forward_hook(get_activation("conv1"))
model.conv2.register_forward_hook(get_activation("conv2"))
out = model(data)
conv1 = activation["conv1"].data.cpu().numpy()
conv2 = activation["conv2"].data.cpu().numpy()
x_tsne = TSNE(n_components=2).fit_transform(conv2)
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(1, 1, 1)
X = x_tsne[:, 0]
Y = x_tsne[:, 1]
ax1.set_xlim([min(X), max(X)])
ax1.set_ylim([min(Y), max(Y)])
for ii in range(x_tsne.shape[0]):
text = dataset.data.y.data.cpu().numpy()[ii]
ax1.text(X[ii], Y[ii, ], str(text), fontsize=5, bbox=dict(
boxstyle="round", facecolor=plt.cm.Set1(text), alpha=0.7))
ax1.set_xlabel("TSNE Feature 1", size=13)
ax1.set_ylabel("TSNE Feature 2", size=13)
ax1.set_title("Original feature TSNE", size=14)
plt.show()
|
the-stack_0_25769
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.task.target_restriction_mixins import (
DeprecatedSkipAndDeprecatedTransitiveGoalOptionsRegistrar,
HasSkipAndTransitiveGoalOptionsMixin,
)
class FmtGoalRegistrar(DeprecatedSkipAndDeprecatedTransitiveGoalOptionsRegistrar):
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--only",
type=str,
default=None,
fingerprint=True,
advanced=True,
help="Only run the specified formatter. Currently the only accepted values are "
"`scalafix` or not setting any value.",
)
class FmtTaskMixin(HasSkipAndTransitiveGoalOptionsMixin):
"""A mixin to combine with code formatting tasks."""
goal_options_registrar_cls = FmtGoalRegistrar
target_filtering_enabled = True
|
the-stack_0_25770
|
from flask import Flask
from flask import request
import socket
import os
import sys
import requests
app = Flask(__name__)
TRACE_HEADERS_TO_PROPAGATE = [
'X-Ot-Span-Context',
'X-Request-Id',
# Zipkin headers
'X-B3-TraceId',
'X-B3-SpanId',
'X-B3-ParentSpanId',
'X-B3-Sampled',
'X-B3-Flags',
# Jaeger header (for native client)
"uber-trace-id"
]
def render_page():
return ('<body bgcolor="{}"><span style="color:white;font-size:4em;">\n'
'Hello from {} (hostname: {} resolvedhostname:{})\n</span></body>\n'.format(
os.environ['SERVICE_NAME'],
os.environ['SERVICE_NAME'],
socket.gethostname(),
socket.gethostbyname(socket.gethostname())))
@app.route('/service/<service_color>')
def service(service_color):
return render_page()
@app.route('/trace/<service_color>')
def trace(service_color):
headers = {}
## For Propagation test ##
# Call service 'green' from service 'blue'
if (os.environ['SERVICE_NAME']) == 'blue':
for header in TRACE_HEADERS_TO_PROPAGATE:
if header in request.headers:
headers[header] = request.headers[header]
ret = requests.get("http://localhost:9000/trace/green", headers=headers)
# Call service 'red' from service 'green'
elif (os.environ['SERVICE_NAME']) == 'green':
for header in TRACE_HEADERS_TO_PROPAGATE:
if header in request.headers:
headers[header] = request.headers[header]
ret = requests.get("http://localhost:9000/trace/red", headers=headers)
return render_page()
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
|
the-stack_0_25771
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Univariate Normal Distribution.
"""
from scipy.stats.distributions import norm
from qiskit.aqua.utils.validation import validate_min
from .univariate_distribution import UnivariateDistribution
class NormalDistribution(UnivariateDistribution):
"""
The Univariate Normal Distribution.
Normal distribution, truncated to lower and upper bound and discretized on a grid defined by
the number of qubits.
"""
def __init__(self,
num_target_qubits: int,
mu: float = 0,
sigma: float = 1,
low: float = -1,
high: float = 1) -> None:
r"""
Args:
num_target_qubits: Number of qubits it acts on, has a minimum value of 1.
mu: Expected value of considered normal distribution
sigma: standard deviation of considered normal distribution
low: Lower bound, i.e., the value corresponding to \|0...0>
(assuming an equidistant grid)
high: Upper bound, i.e., the value corresponding to \|1...1>
(assuming an equidistant grid)
"""
validate_min('num_target_qubits', num_target_qubits, 1)
probabilities, _ = UnivariateDistribution.\
pdf_to_probabilities(
lambda x: norm.pdf(x, mu, sigma), low, high, 2 ** num_target_qubits)
super().__init__(num_target_qubits, probabilities, low, high)
@staticmethod
def _replacement():
return 'qiskit.circuit.library.NormalDistribution'
|
the-stack_0_25772
|
# Copyright (c) 2016-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import pandas
from typechecks import require_string
from pandas import isnull
from .reference import infer_genome
from .variant import Variant, variant_ascending_position_sort_key
from .variant_collection import VariantCollection
TCGA_PATIENT_ID_LENGTH = 12
MAF_COLUMN_NAMES = [
'Hugo_Symbol',
'Entrez_Gene_Id',
'Center',
'NCBI_Build',
'Chromosome',
'Start_Position',
'End_Position',
'Strand',
'Variant_Classification',
'Variant_Type',
'Reference_Allele',
'Tumor_Seq_Allele1',
'Tumor_Seq_Allele2',
'dbSNP_RS',
'dbSNP_Val_Status',
'Tumor_Sample_Barcode',
'Matched_Norm_Sample_Barcode',
'Match_Norm_Seq_Allele1',
'Match_Norm_Seq_Allele2',
]
def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None):
"""
Load the guaranteed columns of a TCGA MAF file into a DataFrame
Parameters
----------
path : str
Path to MAF file
nrows : int
Optional limit to number of rows loaded
raise_on_error : bool
Raise an exception upon encountering an error or log an error
encoding : str, optional
Encoding to use for UTF when reading MAF file.
"""
require_string(path, "Path to MAF")
n_basic_columns = len(MAF_COLUMN_NAMES)
# pylint: disable=no-member
# pylint gets confused by read_csv
df = pandas.read_csv(
path,
comment="#",
sep="\t",
low_memory=False,
skip_blank_lines=True,
header=0,
encoding=encoding)
if len(df.columns) < n_basic_columns:
error_message = (
"Too few columns in MAF file %s, expected %d but got %d : %s" % (
path, n_basic_columns, len(df.columns), df.columns))
if raise_on_error:
raise ValueError(error_message)
else:
logging.warn(error_message)
# check each pair of expected/actual column names to make sure they match
for expected, actual in zip(MAF_COLUMN_NAMES, df.columns):
if expected != actual:
# MAFs in the wild have capitalization differences in their
# column names, normalize them to always use the names above
if expected.lower() == actual.lower():
# using DataFrame.rename in Python 2.7.x doesn't seem to
# work for some files, possibly because Pandas treats
# unicode vs. str columns as different?
df[expected] = df[actual]
del df[actual]
else:
error_message = (
"Expected column %s but got %s" % (expected, actual))
if raise_on_error:
raise ValueError(error_message)
else:
logging.warn(error_message)
return df
def load_maf(
path,
optional_cols=[],
sort_key=variant_ascending_position_sort_key,
distinct=True,
raise_on_error=True,
encoding=None):
"""
Load reference name and Variant objects from MAF filename.
Parameters
----------
path : str
Path to MAF (*.maf).
optional_cols : list, optional
A list of MAF columns to include as metadata if they are present in the MAF.
Does not result in an error if those columns are not present.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : bool
Don't keep repeated variants
raise_on_error : bool
Raise an exception upon encountering an error or just log a warning.
encoding : str, optional
Encoding to use for UTF when reading MAF file.
"""
# pylint: disable=no-member
# pylint gets confused by read_csv inside load_maf_dataframe
maf_df = load_maf_dataframe(path, raise_on_error=raise_on_error, encoding=encoding)
if len(maf_df) == 0 and raise_on_error:
raise ValueError("Empty MAF file %s" % path)
ensembl_objects = {}
variants = []
metadata = {}
for _, x in maf_df.iterrows():
contig = x.Chromosome
if isnull(contig):
error_message = "Invalid contig name: %s" % (contig,)
if raise_on_error:
raise ValueError(error_message)
else:
logging.warn(error_message)
continue
start_pos = x.Start_Position
ref = x.Reference_Allele
# it's possible in a MAF file to have multiple Ensembl releases
# mixed in a single MAF file (the genome assembly is
# specified by the NCBI_Build column)
ncbi_build = x.NCBI_Build
if ncbi_build in ensembl_objects:
ensembl = ensembl_objects[ncbi_build]
else:
if isinstance(ncbi_build, int):
reference_name = "B%d" % ncbi_build
else:
reference_name = str(ncbi_build)
ensembl = infer_genome(reference_name)
ensembl_objects[ncbi_build] = ensembl
# have to try both Tumor_Seq_Allele1 and Tumor_Seq_Allele2
# to figure out which is different from the reference allele
if x.Tumor_Seq_Allele1 != ref:
alt = x.Tumor_Seq_Allele1
else:
if x.Tumor_Seq_Allele2 == ref:
error_message = (
"Both tumor alleles agree with reference %s: %s" % (
ref, x,))
if raise_on_error:
raise ValueError(error_message)
else:
logging.warn(error_message)
continue
alt = x.Tumor_Seq_Allele2
variant = Variant(
contig,
start_pos,
str(ref),
str(alt),
ensembl=ensembl)
# keep metadata about the variant and its TCGA annotation
metadata[variant] = {
'Hugo_Symbol': x.Hugo_Symbol,
'Center': x.Center,
'Strand': x.Strand,
'Variant_Classification': x.Variant_Classification,
'Variant_Type': x.Variant_Type,
'dbSNP_RS': x.dbSNP_RS,
'dbSNP_Val_Status': x.dbSNP_Val_Status,
'Tumor_Sample_Barcode': x.Tumor_Sample_Barcode,
'Matched_Norm_Sample_Barcode': x.Matched_Norm_Sample_Barcode,
}
for optional_col in optional_cols:
if optional_col in x:
metadata[variant][optional_col] = x[optional_col]
variants.append(variant)
return VariantCollection(
variants=variants,
source_to_metadata_dict={path: metadata},
sort_key=sort_key,
distinct=distinct)
|
the-stack_0_25773
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
direct_fetch_response_time = 0.05
class BaseNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders(timeout=5)
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[ inv_node.send_block_inv(x.sha256) for x in blocks ]
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
the-stack_0_25774
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import app.config
import app.line_buffer
import app.log
import app.regex
# No selection.
kSelectionNone = 0
# Entire document selected.
kSelectionAll = 1
# A rectangular block selection.
kSelectionBlock = 2
# Character by character selection.
kSelectionCharacter = 3
# Select whole lines.
kSelectionLine = 4
# Select whole words.
kSelectionWord = 5
# How many selection modes are there.
kSelectionModeCount = 6
kSelectionModeNames = [
'None',
'All',
'Block',
'Char',
'Line',
'Word',
]
class Selectable(app.line_buffer.LineBuffer):
def __init__(self, program):
app.line_buffer.LineBuffer.__init__(self, program)
# When a text document is not line wrapped then each row will represent
# one line in the document, thow rows are zero based and lines are one
# based. With line wrapping enabled there may be more rows than lines
# since a line may wrap into multiple rows.
self.penRow = 0
# When a text document contains only ascii characters then each char
# (character) will represent one column in the text line (col is zero
# based and the column displayed in the UI is one based). When double
# wide character are present then a line of text will have more columns
# than characters.
# (penChar is not currently used).
self.penChar = 0
# When a text document contains only ascii characters then each column
# will represent one column in the text line (col is zero based and
# column displayed in the UI is one based).
self.penCol = 0
self.markerRow = 0
self.markerCol = 0
self.selectionMode = kSelectionNone
def countSelected(self):
lines = self.getSelectedText()
chars = len(lines) - 1 # Count carriage returns.
for line in lines:
chars += len(line)
return chars, len(lines)
def selection(self):
return (self.penRow, self.penCol, self.markerRow, self.markerCol)
def selectionModeName(self):
return kSelectionModeNames[self.selectionMode]
def getSelectedText(self):
upperRow, upperCol, lowerRow, lowerCol = self.startAndEnd()
return self.getText(upperRow, upperCol, lowerRow, lowerCol,
self.selectionMode)
def getText(self,
upperRow,
upperCol,
lowerRow,
lowerCol,
selectionMode=kSelectionCharacter):
if app.config.strict_debug:
assert isinstance(upperRow, int)
assert isinstance(upperCol, int)
assert isinstance(lowerRow, int)
assert isinstance(lowerCol, int)
assert isinstance(selectionMode, int)
assert upperRow <= lowerRow
assert upperRow != lowerRow or upperCol <= lowerCol
assert kSelectionNone <= selectionMode < kSelectionModeCount
lines = []
if selectionMode == kSelectionBlock:
if (lowerRow + 1 < self.parser.rowCount()):
lowerRow += 1
for i in range(upperRow, lowerRow):
lines.append(self.parser.rowText(i, upperCol, lowerCol))
elif (selectionMode == kSelectionAll or
selectionMode == kSelectionCharacter or
selectionMode == kSelectionLine or
selectionMode == kSelectionWord):
if upperRow == lowerRow:
lines.append(self.parser.rowText(upperRow, upperCol, lowerCol))
else:
for i in range(upperRow, lowerRow + 1):
if i == upperRow:
lines.append(self.parser.rowText(i, upperCol))
elif i == lowerRow:
lines.append(self.parser.rowText(i, 0, lowerCol))
else:
lines.append(self.parser.rowText(i))
return tuple(lines)
def doDeleteSelection(self):
"""Call doDelete() with current pen and marker values."""
upperRow, upperCol, lowerRow, lowerCol = self.startAndEnd()
self.doDelete(upperRow, upperCol, lowerRow, lowerCol)
def doDelete(self, upperRow, upperCol, lowerRow, lowerCol):
"""Delete characters from (upperRow, upperCol) up to (lowerRow,
lowerCol) using the current selection mode."""
if app.config.strict_debug:
assert isinstance(upperRow, int)
assert isinstance(upperCol, int)
assert isinstance(lowerRow, int)
assert isinstance(lowerCol, int)
assert upperRow <= lowerRow
assert upperRow != lowerRow or upperCol <= lowerCol
if self.selectionMode == kSelectionBlock:
self.parser.deleteBlock(upperRow, upperCol, lowerRow, lowerCol)
elif (self.selectionMode == kSelectionNone or
self.selectionMode == kSelectionAll or
self.selectionMode == kSelectionCharacter or
self.selectionMode == kSelectionLine or
self.selectionMode == kSelectionWord):
self.parser.deleteRange(upperRow, upperCol, lowerRow, lowerCol)
def insertLines(self, lines):
if app.config.strict_debug:
assert isinstance(lines, tuple)
self.insertLinesAt(self.penRow, self.penCol, lines, self.selectionMode)
def insertLinesAt(self, row, col, lines, selectionMode):
if app.config.strict_debug:
assert isinstance(row, int)
assert isinstance(col, int)
assert isinstance(lines, tuple)
assert isinstance(selectionMode, int)
if len(lines) <= 1:
if len(lines) == 0 or len(lines[0]) == 0:
# Optimization. There's nothing to insert.
return
lines = list(lines)
if selectionMode == kSelectionBlock:
self.parser.insertBlock(row, col, lines)
elif (selectionMode == kSelectionNone or
selectionMode == kSelectionAll or
selectionMode == kSelectionCharacter or
selectionMode == kSelectionLine or
selectionMode == kSelectionWord):
if len(lines) == 1:
self.parser.insert(row, col, lines[0])
else:
self.parser.insertLines(row, col, lines)
else:
app.log.info('selection mode not recognized', selectionMode)
def __extendWords(self, upperRow, upperCol, lowerRow, lowerCol):
"""Extends and existing selection to the nearest word boundaries. The
pen and marker will be extended away from each other. The extension may
occur in one, both, or neither direction.
Returns: tuple of (upperCol, lowerCol).
"""
line = self.parser.rowText(upperRow)
for segment in re.finditer(app.regex.kReWordBoundary, line):
if segment.start() <= upperCol < segment.end():
upperCol = segment.start()
break
line = self.parser.rowText(lowerRow)
for segment in re.finditer(app.regex.kReWordBoundary, line):
if segment.start() < lowerCol < segment.end():
lowerCol = segment.end()
break
return upperCol, lowerCol
def extendSelection(self):
"""Expand the current selection to fit the selection mode. E.g. if the
pen in the middle of a word, selection word will extend the selection to
the left and right so that the whole word is selected.
Returns: tuple of (penRow, penCol, markerRow, markerCol, selectionMode)
which are the delta values to accomplish the selection mode.
"""
if self.selectionMode == kSelectionNone:
return (0, 0, -self.markerRow, -self.markerCol, 0)
elif self.selectionMode == kSelectionAll:
lowerRow = self.parser.rowCount() - 1
lowerCol = self.parser.rowWidth(-1)
return (lowerRow - self.penRow,
lowerCol - self.penCol, -self.markerRow,
-self.markerCol, 0)
elif self.selectionMode == kSelectionLine:
return (0, -self.penCol, 0, -self.markerCol, 0)
elif self.selectionMode == kSelectionWord:
if self.penRow > self.markerRow or (self.penRow == self.markerRow
and
self.penCol > self.markerCol):
upperCol, lowerCol = self.__extendWords(
self.markerRow, self.markerCol, self.penRow, self.penCol)
return (0, lowerCol - self.penCol, 0, upperCol - self.markerCol,
0)
else:
upperCol, lowerCol = self.__extendWords(
self.penRow, self.penCol, self.markerRow, self.markerCol)
return (0, upperCol - self.penCol, 0, lowerCol - self.markerCol,
0)
return (0, 0, 0, 0, 0)
def startAndEnd(self):
"""Get the marker and pen pair as the earlier of the two then the later
of the two. The result accounts for the current selection mode."""
upperRow = 0
upperCol = 0
lowerRow = 0
lowerCol = 0
if self.selectionMode == kSelectionNone:
upperRow = self.penRow
upperCol = self.penCol
lowerRow = self.penRow
lowerCol = self.penCol
elif self.selectionMode == kSelectionAll:
upperRow = 0
upperCol = 0
lowerRow = self.parser.rowCount() - 1
lowerCol = self.parser.rowWidth(-1)
elif self.selectionMode == kSelectionBlock:
upperRow = min(self.markerRow, self.penRow)
upperCol = min(self.markerCol, self.penCol)
lowerRow = max(self.markerRow, self.penRow)
lowerCol = max(self.markerCol, self.penCol)
elif (self.selectionMode == kSelectionCharacter or
self.selectionMode == kSelectionLine or
self.selectionMode == kSelectionWord):
upperRow = self.markerRow
upperCol = self.markerCol
lowerRow = self.penRow
lowerCol = self.penCol
if upperRow == lowerRow and upperCol > lowerCol:
upperCol, lowerCol = lowerCol, upperCol
elif upperRow > lowerRow:
upperRow, lowerRow = lowerRow, upperRow
upperCol, lowerCol = lowerCol, upperCol
#app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)
return (upperRow, upperCol, lowerRow, lowerCol)
|
the-stack_0_25776
|
# Just a shitty test
# The way to type a version:
# {full release}.{new features}.{fixes}-{DD/MM}A/B/F (Alpha, Beta, Final)
# Example: 0.1.0-0110A (1st of October, Alpha)
import glob
import os
import json
from discord.ext import commands
# Init the bot, prefix and help description.
bot = commands.Bot(command_prefix='!', description='Commands currently available')
@bot.event
async def on_ready():
# Prints the bot name, discrim and ID when started up
print('METALLICA!')
print(f'------\nReady!\n{bot.user}\n{bot.user.id}\n------')
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if message.content.lower() == "risotto":
await message.channel.send("Issy")
await bot.process_commands(message)
# Searches for existing modules (commands) in /modules/
if __name__ == '__main__':
for module in glob.glob('./modules/*.py'):
print(module)
try:
print("i'm trying ok")
bot.load_extension(f'modules.{os.path.basename(module)[:-3]}')
except Exception as e:
exc = f'{__name__}: {e}'
print(f'Failed to load module {module}\n{exc}')
# Opens the json file and fetches the token
with open('risobot.json') as f:
diepy = json.load(f)
token = diepy.get('token')
bot.run(token)
|
the-stack_0_25777
|
"""
Small script for benchmarking the MNIST 1024 network
Author: Patrick Henriksen <[email protected]>
"""
import os
import numpy as np
from src.scripts.benchmark import run_benchmark
from src.data_loader.input_data_loader import load_mnist_human_readable
if __name__ == "__main__":
epsilons = [1, 2, 5, 10, 15]
timeout = 3600
num_images = 50
img_dir: str = f"../../data/mnist_neurify/test_images_100/"
if not os.path.isdir("../../benchmark_results"):
os.mkdir("../../benchmark_results")
run_benchmark(images=load_mnist_human_readable(img_dir, list(range(num_images)))[:, np.newaxis, :, :],
epsilons=epsilons,
timeout=timeout,
conv=True,
model_path="../../data/models_nnet/neurify/conv.nnet",
result_path=f"../../benchmark_results/mnist_{num_images}_imgs_conv.txt")
|
the-stack_0_25779
|
from migen import Module, Signal, If, Instance
from litex.soc.integration.doc import ModuleDoc
from litex.soc.interconnect.csr import AutoCSR, CSRStatus, CSRStorage, CSRField
import litex.soc.doc as lxsocdoc
class SBWarmBoot(Module, AutoCSR):
def __init__(self, parent, offsets=None):
table = ""
if offsets is not None:
arr = [["Image", "Offset"]]
for i,offset in enumerate(offsets):
arr.append([str(i), str(offset)])
table = "\nYou can use this block to reboot into one of these four addresses:\n\n" \
+ lxsocdoc.rst.make_table(arr)
self.intro = ModuleDoc("""FPGA Reboot Interface
This module provides the ability to reboot the FPGA. It is based on the
``SB_WARMBOOT`` primitive built in to the FPGA.
When power is applied to the FPGA, it reads configuration data from the
onboard flash chip. This contains reboot offsets for four images. It then
booted from the first image, but kept note of the other addresses.
{}""".format(table))
self.ctrl = CSRStorage(fields=[
CSRField("image", size=2, description="""
Which image to reboot to. ``SB_WARMBOOT`` supports four images that
are configured at FPGA startup. The bootloader is image 0, so set
these bits to 0 to reboot back into the bootloader.
"""),
CSRField("key", size=6, description="""
A reboot key used to prevent accidental reboots when writing to random
areas of memory. To initiate a reboot, set this to ``0b101011``.""")
], description="""
Provides support for rebooting the FPGA. You can select which of the four images
to reboot to, just be sure to OR the image number with ``0xac``. For example,
to reboot to the bootloader (image 0), write ``0xac``` to this register."""
)
self.addr = CSRStorage(size=32, description="""
This sets the reset vector for the VexRiscv. This address will be used whenever
the CPU is reset, for example through a debug bridge. You should update this
address whenever you load a new program, to enable the debugger to run ``mon reset``
"""
)
do_reset = Signal()
self.comb += [
# "Reset Key" is 0xac (0b101011xx)
do_reset.eq(self.ctrl.storage[2] & self.ctrl.storage[3] & ~self.ctrl.storage[4]
& self.ctrl.storage[5] & ~self.ctrl.storage[6] & self.ctrl.storage[7])
]
self.specials += Instance("SB_WARMBOOT",
i_S0 = self.ctrl.storage[0],
i_S1 = self.ctrl.storage[1],
i_BOOT = do_reset,
)
parent.config["BITSTREAM_SYNC_HEADER1"] = 0x7e99aa7e
parent.config["BITSTREAM_SYNC_HEADER2"] = 0x7eaa997e
|
the-stack_0_25780
|
from terra.utils.jsonserializable import JsonSerializable
class MsgExchangeRateVote(JsonSerializable):
def __init__(
self,
exchangerate: str,
salt: str,
denom: str,
feeder: str,
validator: str,
) -> None:
"""Represent the top level of a MsgExchangeRateVote message."""
self.type = "oracle/MsgExchangeRateVote"
self.value = MsgExchangeRateVoteValue(
exchangerate, salt, denom, feeder, validator
)
class MsgExchangeRateVoteValue(JsonSerializable):
def __init__(
self,
exchangerate: str,
salt: str,
denom: str,
feeder: str,
validator: str,
) -> None:
"""Values of a MsgExchangeRateVote message."""
self.exchangerate = exchangerate
self.salt = salt
self.denom = denom
self.feeder = feeder
self.validator = validator
|
the-stack_0_25781
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trinoadmin.util.fabric_application import FabricApplication
from tests.base_test_case import BaseTestCase
from mock import patch
import sys
import logging
APPLICATION_NAME = 'foo'
@patch('prestoadmin.util.application.logging.config')
class FabricApplicationTest(BaseTestCase):
def setUp(self):
# basicConfig is a noop if there are already handlers
# present on the root logger, remove them all here
self.__old_log_handlers = []
for handler in logging.root.handlers:
self.__old_log_handlers.append(handler)
logging.root.removeHandler(handler)
super(FabricApplicationTest, self).setUp(capture_output=True)
def tearDown(self):
# restore the old log handlers
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
for handler in self.__old_log_handlers:
logging.root.addHandler(handler)
BaseTestCase.tearDown(self)
@patch('prestoadmin.util.fabric_application.disconnect_all', autospec=True)
def test_disconnect_all(self, disconnect_mock, logging_conf_mock):
def should_disconnect():
with FabricApplication(APPLICATION_NAME):
sys.exit()
self.assertRaises(SystemExit, should_disconnect)
disconnect_mock.assert_called_with()
@patch('prestoadmin.util.application.logger')
@patch('prestoadmin.util.filesystem.os.makedirs')
def test_keyboard_interrupt(self, make_dirs_mock, logger_mock,
logging_conf_mock):
def should_not_error():
with FabricApplication(APPLICATION_NAME):
raise KeyboardInterrupt
try:
should_not_error()
except SystemExit as e:
self.assertEqual(0, e.code)
self.assertEqual("Stopped.\n", self.test_stderr.getvalue())
else:
self.fail('Keyboard interrupt did not cause a system exit.')
def test_handles_errors(self, logging_mock):
def should_fail():
with FabricApplication(APPLICATION_NAME):
raise Exception('error message')
self.assertRaises(SystemExit, should_fail)
self.assertEqual(self.test_stderr.getvalue(), 'error message\n')
|
the-stack_0_25782
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import IECoreScene
class TestPointVelocityDisplaceOp( unittest.TestCase ) :
def test( self ) :
pts = IECoreScene.PointsPrimitive(0)
vertex = IECoreScene.PrimitiveVariable.Interpolation.Vertex
# check calling with no arguments
o = IECoreScene.PointVelocityDisplaceOp()
self.assertRaises( RuntimeError, o )
# check not passing v is a passthru
o = IECoreScene.PointVelocityDisplaceOp()
pts["P"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f(0) ] ) )
self.assertRaises( RuntimeError, o, input=pts, copyInput=True )
# check it works
o = IECoreScene.PointVelocityDisplaceOp()
pts["P"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f(0) ] ) )
pts["v"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f(1) ] ) )
p = o(input=pts)
self.assertNotEqual( pts["P"].data, p["P"].data )
self.assertEqual( p["P"].data[0], imath.V3f(1) )
p = o(input=pts, copyInput=False)
self.assertEqual( pts["P"].data, p["P"].data )
self.assertEqual( pts["P"].data[0], imath.V3f(1) )
# slightly more interesting example
o = IECoreScene.PointVelocityDisplaceOp()
pts["P"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f( 1,2,3 ), imath.V3f( 4,5,6 ) ] ) )
pts["v"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f( 1 ), imath.V3f( 2, 1, 3 ) ] ) )
p = o(input=pts)
self.assertEqual( p["P"].data, IECore.V3fVectorData([ imath.V3f(2,3,4), imath.V3f(6,6,9) ] ) )
# check samplelength works
o = IECoreScene.PointVelocityDisplaceOp()
pts["P"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f( 1,2,3 ), imath.V3f( 4,5,6 ) ] ) )
pts["v"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f( 1 ), imath.V3f( 2, 1, 3 ) ] ) )
p = o(input=pts, sampleLength=0.5)
self.assertEqual( p["P"].data, IECore.V3fVectorData([ imath.V3f(1.5,2.5,3.5), imath.V3f(5,5.5,7.5) ] ) )
# check that len(p)!=len(v) raises exception
o = IECoreScene.PointVelocityDisplaceOp()
pts["P"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f(0), imath.V3f(1), imath.V3f(2) ] ) )
pts["v"] = IECoreScene.PrimitiveVariable( vertex, IECore.V3fVectorData( [ imath.V3f( 1 ) ] ) )
self.assertRaises( RuntimeError, o, input=pts )
# check that it works with other primitives
o = IECoreScene.PointVelocityDisplaceOp()
c = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f(0), imath.V3f(1) ) )
self.assertEqual( len(c['P'].data), 8 )
v = IECore.V3fVectorData( [] )
v.resize( 8, imath.V3f(1) )
c['v'] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, v )
c2 = o(input=c)
for i in range(8):
self.assertEqual( c2['P'].data[i], c['P'].data[i] + c['v'].data[i] )
# check that it works with pervertex samplelength
o = IECoreScene.PointVelocityDisplaceOp()
s = IECore.FloatVectorData( [] )
for i in range(8):
s.append( 0.1 * i )
c['s'] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, s )
c2 = o(input=c, sampleLengthVar="s")
for i in range(8):
self.assertEqual( c2['P'].data[i], c['P'].data[i] + (c['v'].data[i] * c['s'].data[i]) )
# check that samplelength array length check raises
o = IECoreScene.PointVelocityDisplaceOp()
s = IECore.FloatVectorData( [] )
for i in range(4):
s.append( 0.1 * i )
c['s'] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, s )
self.assertRaises( RuntimeError, o, input=pts, sampleLengthVar="s" )
# check that it works with different var names
o = IECoreScene.PointVelocityDisplaceOp()
c = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f(0), imath.V3f(1) ) )
IECoreScene.MeshNormalsOp()( input=c, copyInput=False )
self.assertEqual( len(c.keys()), 3 )
self.assertTrue( "N" in c )
c['bob'] = c['P']
del c['P']
self.assertEqual( len(c.keys()), 3 )
c2 = o(input=c, positionVar="bob", velocityVar="N")
for i in range(8):
self.assertEqual( c2['bob'].data[i], c['bob'].data[i] + c['N'].data[i] )
if __name__ == "__main__":
unittest.main()
|
the-stack_0_25784
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 14:38:14 2018
@author: kennedy
"""
import pandas as pd
import numpy as np
class TechnicalIndicators:
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n: window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_{}'.format(n))
return MA
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n: window of data to take moving exponent mean
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
return EMA
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
return M
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
return ROC
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean(), name='ATR_' + str(n))
return ATR
def bollinger_bands(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean())
MSD = pd.Series(df['Close'].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
return pd.concat([B1, B2], axis = 1)
def ppsr(df):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
psr = {'PP': PP, 'R1': R1, 'S1': S1, 'R2': R2, 'S2': S2, 'R3': R3, 'S3': S3}
PSR = pd.DataFrame(psr)
return PSR
def stochastic_oscillator_k(df):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return SOk
def stochastic_oscillator_d(df, n):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='SO%d_' + str(n))
return SOd
def trix(df, n):
"""Calculate TRIX for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
EX1 = df['Close'].ewm(span=n, min_periods=n).mean()
EX2 = EX1.ewm(span=n, min_periods=n).mean()
EX3 = EX2.ewm(span=n, min_periods=n).mean()
i = 0
ROC_l = [np.nan]
while i + 1 <= df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
Trix = pd.Series(ROC_l, name='Trix_' + str(n))
return Trix
def average_directional_movement_index(df, n, n_ADX):
"""Calculate the Average Directional Movement Index for given data.
:param df: pandas.DataFrame
:param n: data window
:param n_ADX:
:return: pandas.DataFrame
"""
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean() / ATR)
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean() / ATR)
ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span=n_ADX, min_periods=n_ADX).mean(),
name='ADX_' + str(n) + '_' + str(n_ADX))
return ADX
def macd(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
return pd.concat([MACD, MACDsign, MACDdiff], axis = 1)
def mass_index(df, n):
"""Calculate the Mass Index for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
Range = df['High'] - df['Low']
EX1 = Range.ewm(span=9, min_periods=9).mean()
EX2 = EX1.ewm(span=9, min_periods=9).mean()
Mass = EX1 / EX2
MassI = pd.Series(Mass.rolling(n).sum(), name='Mass Index')
return MassI
def vortex_indicator(df, n):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR = [0]
while i < df.index[-1]:
Range = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < df.index[-1]:
Range = abs(df.loc[i + 1, 'High'] - df.loc[i, 'Low']) - abs(df.loc[i + 1, 'Low'] - df.loc[i, 'High'])
VM.append(Range)
i = i + 1
VI = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum(), name='Vortex_' + str(n))
return VI
def kst_oscillator(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""Calculate KST Oscillator for given data.
:param df: pandas.DataFrame
:param r1:
:param r2:
:param r3:
:param r4:
:param n1:
:param n2:
:param n3:
:param n4:
:return: pandas.DataFrame
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
KST = pd.Series(
ROC1.rolling(n1).sum() + ROC2.rolling(n2).sum() * 2 + ROC3.rolling(n3).sum() * 3 + ROC4.rolling(n4).sum() * 4,
name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(
n2) + '_' + str(n3) + '_' + str(n4))
return KST
def relative_strength_index(df, n):
"""Calculate Relative Strength Index(RSI) for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean())
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean())
RSI = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return RSI
def true_strength_index(df, r, s):
"""Calculate True Strength Index (TSI) for given data.
:param df: pandas.DataFrame
:param r:
:param s:
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(M.ewm(span=r, min_periods=r).mean())
aEMA1 = pd.Series(aM.ewm(span=r, min_periods=r).mean())
EMA2 = pd.Series(EMA1.ewm(span=s, min_periods=s).mean())
aEMA2 = pd.Series(aEMA1.ewm(span=s, min_periods=s).mean())
TSI = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
return TSI
def accumulation_distribution(df, n):
"""Calculate Accumulation/Distribution for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
AD = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
return AD
def chaikin_oscillator(df):
"""Calculate Chaikin Oscillator for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
Chaikin = pd.Series(ad.ewm(span=3, min_periods=3).mean() - ad.ewm(span=10, min_periods=10).mean(), name='Chaikin')
return Chaikin
def money_flow_index(df, n):
"""Calculate Money Flow Index and Ratio for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.loc[i + 1, 'Volume'])
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(MFR.rolling(n, min_periods=n).mean(), name='MFI_' + str(n))
return MFI
def on_balance_volume(df, n):
"""Calculate On-Balance Volume for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
OBV = [0]
while i < df.index[-1]:
if df.loc[i + 1, 'Close'] - df.loc[i, 'Close'] > 0:
OBV.append(df.loc[i + 1, 'Volume'])
if df.loc[i + 1, 'Close'] - df.loc[i, 'Close'] == 0:
OBV.append(0)
if df.loc[i + 1, 'Close'] - df.loc[i, 'Close'] < 0:
OBV.append(-df.loc[i + 1, 'Volume'])
i = i + 1
OBV = pd.Series(OBV)
OBV_ma = pd.Series(OBV.rolling(n, min_periods=n).mean(), name='OBV_' + str(n))
return OBV_ma
def force_index(df, n):
"""Calculate Force Index for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
F = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name='Force_' + str(n))
return F
def ease_of_movement(df, n):
"""Calculate Ease of Movement for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
Eom_ma = pd.Series(EoM.rolling(n, min_periods=n).mean(), name='EoM_' + str(n))
return Eom_ma
def commodity_channel_index(df, n):
"""Calculate Commodity Channel Index for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
CCI = pd.Series((PP - PP.rolling(n, min_periods=n).mean()) / PP.rolling(n, min_periods=n).std(),
name='CCI_' + str(n))
return CCI
def coppock_curve(df, n):
"""Calculate Coppock Curve for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = df['Close'].diff(int(n * 11 / 10) - 1)
N = df['Close'].shift(int(n * 11 / 10) - 1)
ROC1 = M / N
M = df['Close'].diff(int(n * 14 / 10) - 1)
N = df['Close'].shift(int(n * 14 / 10) - 1)
ROC2 = M / N
Copp = pd.Series((ROC1 + ROC2).ewm(span=n, min_periods=n).mean(), name='Copp_' + str(n))
return Copp
def keltner_channel(df, n):
"""Calculate Keltner Channel for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
KelChM = pd.Series(((df['High'] + df['Low'] + df['Close']) / 3).rolling(n, min_periods=n).mean(),
name='KelChM_' + str(n))
KelChU = pd.Series(((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3).rolling(n, min_periods=n).mean(),
name='KelChU_' + str(n))
KelChD = pd.Series(((-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3).rolling(n, min_periods=n).mean(),
name='KelChD_' + str(n))
return pd.concat([KelChM, KelChU, KelChD], axis = 1)
def ultimate_oscillator(df):
"""Calculate Ultimate Oscillator for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
BP_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
BP = df.loc[i + 1, 'Close'] - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
BP_l.append(BP)
i = i + 1
UltO = pd.Series((4 * pd.Series(BP_l).rolling(7).sum() / pd.Series(TR_l).rolling(7).sum()) + (
2 * pd.Series(BP_l).rolling(14).sum() / pd.Series(TR_l).rolling(14).sum()) + (
pd.Series(BP_l).rolling(28).sum() / pd.Series(TR_l).rolling(28).sum()),
name='Ultimate_Osc')
return UltO
def donchian_channel(df, n):
"""Calculate donchian channel of given pandas data frame.
:param df: pandas.DataFrame
:param n:data window
:return: pandas.DataFrame
"""
i = 0
dc_l = []
while i < n - 1:
dc_l.append(0)
i += 1
i = 0
while i + n - 1 < df.index[-1]:
dc = max(df['High'].ix[i:i + n - 1]) - min(df['Low'].ix[i:i + n - 1])
dc_l.append(dc)
i += 1
donchian_chan = pd.Series(dc_l, name='Donchian_' + str(n))
donchian_chan = donchian_chan.shift(n - 1)
return donchian_chan
def standard_deviation(df, n):
"""Calculate Standard Deviation for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
std = pd.Series(df['Close'].rolling(n, min_periods=n).std(), name='STD_' + str(n))
return std
|
the-stack_0_25785
|
"""General utilities used by the Rust package."""
import sublime
import textwrap
import os
PACKAGE_NAME = __package__.split('.')[0]
def index_with(l, cb):
"""Find the index of a value in a sequence using a callback.
:param l: The sequence to search.
:param cb: Function to call, should return true if the given value matches
what you are searching for.
:returns: Returns the index of the match, or -1 if no match.
"""
for i, v in enumerate(l):
if cb(v):
return i
return -1
def multiline_fix(s):
"""Remove indentation from a multi-line string."""
return textwrap.dedent(s).lstrip()
def get_setting(name, default=None):
"""Retrieve a setting from Sublime settings."""
pdata = sublime.active_window().project_data()
if pdata:
v = pdata.get('settings', {}).get(name)
if v is not None:
return v
settings = sublime.load_settings('LionsEnhanced.sublime-settings')
v = settings.get(name)
if v is not None:
return v
settings = sublime.load_settings('Preferences.sublime-settings')
# XXX: Also check "Distraction Free"?
return settings.get(name, default)
def get_rustc_version(window, cwd, toolchain=None):
"""Returns the rust version for the given directory.
:Returns: A string such as '1.16.0' or '1.17.0-nightly'.
"""
from . import rust_proc
cmd = ['rustc']
if toolchain:
cmd.append('+' + toolchain)
cmd.append('--version')
output = rust_proc.check_output(window, cmd, cwd)
# Example outputs:
# rustc 1.15.1 (021bd294c 2017-02-08)
# rustc 1.16.0-beta.2 (bc15d5281 2017-02-16)
# rustc 1.17.0-nightly (306035c21 2017-02-18)
return output.split()[1]
def find_cargo_manifest(path):
"""Find the Cargo.toml file in the given path, or any of its parents.
:Returns: The path where Cargo.toml is found, or None.
"""
path = os.path.normpath(path)
if os.path.isfile(path):
path = os.path.dirname(path)
while True:
manifest = os.path.join(path, 'Cargo.toml')
if os.path.exists(manifest):
return path
parent = os.path.dirname(path)
if parent == path:
return None
path = parent
def active_view_is_rust(window=None, view=None):
"""Determine if the current view is a Rust source file.
:param window: The Sublime window (defaults to active window).
:param view: The view to check (defaults to active view).
:Returns: True if it is a Rust source file, False if not.
"""
if view is None:
if window is None:
window = sublime.active_window()
view = window.active_view()
if not view:
return False
# Require it to be saved to disk.
if not view.file_name():
return False
return 'source.rust' in view.scope_name(0)
def is_rust_view(settings):
"""Helper for use with ViewEventListener."""
s = settings.get('syntax')
return (s == 'Packages/%s/LionsEnhanced.sublime-syntax' % (PACKAGE_NAME,))
def get_cargo_metadata(window, cwd, toolchain=None):
"""Load Cargo metadata.
:returns: None on failure, otherwise a dictionary from Cargo:
- packages: List of packages:
- name
- manifest_path: Path to Cargo.toml.
- targets: List of target dictionaries:
- name: Name of target.
- src_path: Path of top-level source file. May be a
relative path.
- kind: List of kinds. May contain multiple entries if
`crate-type` specifies multiple values in Cargo.toml.
Lots of different types of values:
- Libraries: 'lib', 'rlib', 'dylib', 'cdylib', 'staticlib',
'proc-macro'
- Executables: 'bin', 'test', 'example', 'bench'
- build.rs: 'custom-build'
:raises ProcessTermiantedError: Process was terminated by another thread.
"""
from . import rust_proc
cmd = ['cargo']
if toolchain:
cmd.append('+' + toolchain)
cmd.extend(['metadata', '--no-deps'])
output = rust_proc.slurp_json(window,
cmd,
cwd=cwd)
if output:
return output[0]
else:
return None
def icon_path(level, res=None):
"""Return a path to a message-level icon."""
level = str(level)
if level not in ('error', 'warning', 'note', 'help', 'none'):
return ''
gutter_style = get_setting('rust_gutter_style', 'shape')
if gutter_style == 'none':
return ''
else:
if res:
res_suffix = '@%ix' % (res,)
else:
res_suffix = ''
return 'Packages/%s/images/gutter/%s-%s%s.png' % (
PACKAGE_NAME, gutter_style, level, res_suffix)
def open_views_for_file(window, file_name):
"""Return all views for the given file name."""
view = window.find_open_file(file_name)
if view is None:
return []
return [v for v in window.views() if v.buffer_id() == view.buffer_id()]
|
the-stack_0_25786
|
# -*- coding: utf-8 -*-
'''
The top level interface used to translate configuration data back to the
correct cloud modules
'''
from __future__ import absolute_import
# Import python libs
from __future__ import print_function, generators
import copy
import os
import traceback
import glob
import time
import signal
import logging
import multiprocessing
from itertools import groupby
from salt.ext.six.moves import input
# Import salt.cloud libs
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudException,
SaltCloudSystemExit,
SaltCloudConfigError
)
# Import salt libs
import salt.config
import salt.client
import salt.loader
import salt.utils
import salt.utils.cloud
from salt.utils import context
from salt.ext.six import string_types
from salt.template import compile_template
# Import third party libs
import yaml
# Get logging started
log = logging.getLogger(__name__)
def communicator(func):
'''Warning, this is a picklable decorator !'''
def _call(queue, args, kw):
'''called with [queue, args, kwargs] as first optional arg'''
kw['queue'] = queue
ret = None
try:
ret = func(*args, **kw)
queue.put('END')
except KeyboardInterrupt as ex:
trace = traceback.format_exc()
queue.put('KEYBOARDINT')
queue.put('Keyboard interrupt')
queue.put('{0}\n{1}\n'.format(ex, trace))
except Exception as ex:
trace = traceback.format_exc()
queue.put('ERROR')
queue.put('Exception')
queue.put('{0}\n{1}\n'.format(ex, trace))
return ret
return _call
def enter_mainloop(target,
mapped_args=None,
args=None,
kwargs=None,
pool=None,
pool_size=None,
callback=None,
queue=None):
'''Manage a multiprocessing pool
- If the queue does not output anything, the pool runs indefinitely
- If the queue returns KEYBOARDINT or ERROR, this will kill the pool
totally calling terminate & join and ands with a SaltCloudSystemExit
exception notifying callers from the abnormal termination
- If the queue returns END or callback is defined and returns True,
it just join the process and return the data.
target
the function you want to execute in multiproccessing
pool
pool object can be None if you want a default pool, but you ll
have then to define pool_size instead
pool_size
pool size if you did not provide yourself a pool
callback
a boolean taking a string in argument which returns True to
signal that 'target' is finished and we need to join
the pool
queue
A custom multiproccessing queue in case you want to do
extra stuff and need it later in your program
args
positional arguments to call the function with
if you don't want to use pool.map
mapped_args
a list of one or more arguments combinations to call the function with
e.g. (foo, [[1], [2]]) will call::
foo([1])
foo([2])
kwargs
kwargs to give to the function in case of process
Attention, the function must have the following signature:
target(queue, *args, **kw)
You may use the 'communicator' decorator to generate such a function
(see end of this file)
'''
if not kwargs:
kwargs = {}
if not pool_size:
pool_size = 1
if not pool:
pool = multiprocessing.Pool(pool_size)
if not queue:
manager = multiprocessing.Manager()
queue = manager.Queue()
if mapped_args is not None and not mapped_args:
msg = (
'We are called to asynchronously execute {0}'
' but we do no have anything to execute, weird,'
' we bail out'.format(target))
log.error(msg)
raise SaltCloudSystemExit('Exception caught\n{0}'.format(msg))
elif mapped_args is not None:
iterable = [[queue, [arg], kwargs] for arg in mapped_args]
ret = pool.map(func=target, iterable=iterable)
else:
ret = pool.apply(target, [queue, args, kwargs])
while True:
test = queue.get()
if test in ['ERROR', 'KEYBOARDINT']:
type_ = queue.get()
trace = queue.get()
msg = 'Caught {0}, terminating workers\n'.format(type_)
msg += 'TRACE: {0}\n'.format(trace)
log.error(msg)
pool.terminate()
pool.join()
raise SaltCloudSystemExit('Exception caught\n{0}'.format(msg))
elif test in ['END'] or (callback and callback(test)):
pool.close()
pool.join()
break
else:
time.sleep(0.125)
return ret
class CloudClient(object):
'''
The client class to wrap cloud interactions
'''
def __init__(self, path=None, opts=None, config_dir=None, pillars=None):
if opts:
self.opts = opts
else:
self.opts = salt.config.cloud_config(path)
if pillars:
for name, provider in pillars.pop('providers', {}).items():
driver = provider['provider']
provider['profiles'] = {}
self.opts['providers'].update({name: {driver: provider}})
for name, profile in pillars.pop('profiles', {}).items():
provider = profile['provider'].split(':')[0]
driver = next(iter(self.opts['providers'][provider].keys()))
profile['provider'] = '{0}:{1}'.format(provider, driver)
profile['profile'] = name
self.opts['profiles'].update({name: profile})
self.opts['providers'][provider][driver]['profiles'].update({name: profile})
self.opts.update(pillars)
def _opts_defaults(self, **kwargs):
'''
Set the opts dict to defaults and allow for opts to be overridden in
the kwargs
'''
# Let's start with the default salt cloud configuration
opts = salt.config.CLOUD_CONFIG_DEFAULTS.copy()
# Update it with the loaded configuration
opts.update(self.opts.copy())
# Reset some of the settings to sane values
opts['parallel'] = False
opts['keep_tmp'] = False
opts['deploy'] = True
opts['update_bootstrap'] = False
opts['show_deploy_args'] = False
opts['script_args'] = ''
# Update it with the passed kwargs
if 'kwargs' in kwargs:
opts.update(kwargs['kwargs'])
opts.update(kwargs)
profile = opts.get('profile', None)
# filter other profiles if one is specified
if profile:
for _profile in [a for a in opts.get('profiles', {})]:
if not _profile == profile:
opts['profiles'].pop(_profile)
# if profile is specified and we have enough info about providers
# also filter them to speedup methods like
# __filter_non_working_providers
providers = [a.get('provider', '').split(':')[0]
for a in opts['profiles'].values()
if a.get('provider', '')]
if providers:
_providers = opts.get('providers', {})
for p in [a for a in _providers]:
if p not in providers:
_providers.pop(p)
return opts
def low(self, fun, low):
'''
Pass the cloud function and low data structure to run
'''
l_fun = getattr(self, fun)
f_call = salt.utils.format_call(l_fun, low)
return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
def list_sizes(self, provider=None):
'''
List all available sizes in configured cloud systems
'''
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.cloud.simple_types_filter(
mapper.size_list(provider)
)
def list_images(self, provider=None):
'''
List all available images in configured cloud systems
'''
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.cloud.simple_types_filter(
mapper.image_list(provider)
)
def list_locations(self, provider=None):
'''
List all available locations in configured cloud systems
'''
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.cloud.simple_types_filter(
mapper.location_list(provider)
)
def query(self, query_type='list_nodes'):
'''
Query basic instance information
'''
mapper = salt.cloud.Map(self._opts_defaults())
return mapper.map_providers_parallel(query_type)
def full_query(self, query_type='list_nodes_full'):
'''
Query all instance information
'''
mapper = salt.cloud.Map(self._opts_defaults())
return mapper.map_providers_parallel(query_type)
def select_query(self, query_type='list_nodes_select'):
'''
Query select instance information
'''
mapper = salt.cloud.Map(self._opts_defaults())
return mapper.map_providers_parallel(query_type)
def min_query(self, query_type='list_nodes_min'):
'''
Query select instance information
'''
mapper = salt.cloud.Map(self._opts_defaults())
return mapper.map_providers_parallel(query_type)
def profile(self, profile, names, vm_overrides=None, **kwargs):
'''
Pass in a profile to create, names is a list of vm names to allocate
vm_overrides is a special dict that will be per node options
overrides
Example:
.. code-block:: python
>>> client= salt.cloud.CloudClient(path='/etc/salt/cloud')
>>> client.profile('do_512_git', names=['minion01',])
{'minion01': {u'backups_active': 'False',
u'created_at': '2014-09-04T18:10:15Z',
u'droplet': {u'event_id': 31000502,
u'id': 2530006,
u'image_id': 5140006,
u'name': u'minion01',
u'size_id': 66},
u'id': '2530006',
u'image_id': '5140006',
u'ip_address': '107.XXX.XXX.XXX',
u'locked': 'True',
u'name': 'minion01',
u'private_ip_address': None,
u'region_id': '4',
u'size_id': '66',
u'status': 'new'}}
'''
if not vm_overrides:
vm_overrides = {}
kwargs['profile'] = profile
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
if isinstance(names, str):
names = names.split(',')
return salt.utils.cloud.simple_types_filter(
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
)
def map_run(self, path, **kwargs):
'''
Pass in a location for a map to execute
'''
kwarg = {'map': path}
kwarg.update(kwargs)
mapper = salt.cloud.Map(self._opts_defaults(**kwarg))
dmap = mapper.map_data()
return salt.utils.cloud.simple_types_filter(
mapper.run_map(dmap)
)
def destroy(self, names):
'''
Destroy the named VMs
'''
mapper = salt.cloud.Map(self._opts_defaults())
if isinstance(names, str):
names = names.split(',')
return salt.utils.cloud.simple_types_filter(
mapper.destroy(names)
)
def create(self, provider, names, **kwargs):
'''
Create the named VMs, without using a profile
Example:
.. code-block:: python
client.create(names=['myinstance'], provider='my-ec2-config',
kwargs={'image': 'ami-1624987f', 'size': 't1.micro',
'ssh_username': 'ec2-user', 'securitygroup': 'default',
'delvol_on_destroy': True})
'''
mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts['providers']
if provider in providers:
provider += ':{0}'.format(next(iter(providers[provider].keys())))
else:
return False
if isinstance(names, str):
names = names.split(',')
ret = {}
for name in names:
vm_ = kwargs.copy()
vm_['name'] = name
vm_['provider'] = provider
vm_['profile'] = None
ret[name] = salt.utils.cloud.simple_types_filter(
mapper.create(vm_))
return ret
def extra_action(self, names, provider, action, **kwargs):
'''
Perform actions with block storage devices
Example:
.. code-block:: python
client.extra_action(names=['myblock'], action='volume_create',
provider='my-nova', kwargs={'voltype': 'SSD', 'size': 1000}
)
client.extra_action(names=['salt-net'], action='network_create',
provider='my-nova', kwargs={'cidr': '192.168.100.0/24'}
)
'''
mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel()
if provider in providers:
provider += ':{0}'.format(next(iter(providers[provider].keys())))
else:
return False
if isinstance(names, str):
names = names.split(',')
ret = {}
for name in names:
extra_ = kwargs.copy()
extra_['name'] = name
extra_['provider'] = provider
extra_['profile'] = None
extra_['action'] = action
ret[name] = salt.utils.cloud.simple_types_filter(
mapper.extras(extra_)
)
return ret
def action(
self,
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
kwargs=None
):
'''
Execute a single action via the cloud plugin backend
Examples:
.. code-block:: python
client.action(fun='show_instance', names=['myinstance'])
client.action(fun='show_image', provider='my-ec2-config',
kwargs={'image': 'ami-10314d79'}
)
'''
mapper = salt.cloud.Map(self._opts_defaults(action=fun))
if names and not provider:
self.opts['action'] = fun
return mapper.do_action(names, kwargs)
if provider:
return mapper.do_function(provider, fun, kwargs)
else:
# This should not be called without either an instance or a
# provider.
raise SaltCloudConfigError(
'Either an instance or a provider must be specified.'
)
return salt.utils.cloud.simple_types_filter(
mapper.run_profile(fun, names)
)
# map
# create
# destroy
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in self.opts['providers'].items():
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in self.opts['providers'].items():
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched {0!r}. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return set((alias, driver))
providers = set()
for alias, drivers in self.opts['providers'].items():
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched {0!r}. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in self.opts['profiles'].items():
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in self.opts['providers'].items():
for driver, details in drivers.items():
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error(
'Public cloud provider {0} is not available'.format(
driver
)
)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'{0}()\' while querying for '
'running nodes: {1}'.format(fun, err),
# Show the traceback if the debug logging level is
# enabled
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in opts['providers'].items():
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in drivers.items():
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if 'selected_query_option' not in opts:
if '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error(
'Public cloud provider {0} is not available'.format(
driver
)
)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in mapped_providers.items():
for driver, vms in drivers.items():
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile:
if alias not in \
self.opts['profiles'][profile]['provider'].split(
':'
)[0]:
continue
for vm_name, details in vms.items():
# XXX: The logic bellow can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in providers.items():
for name, data in driver.items():
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in provider_by_driver.items():
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug(
'The {0!r} cloud driver is unable to be optimized.'.format(
driver
)
)
for name, prov_data in providers_data.items():
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in new_data.items():
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The {0!r} cloud driver defined under {1!r} provider '
'alias is unable to get the locations information'.format(
driver, alias
)
)
continue
if alias not in data:
data[alias] = {}
try:
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'{0}()\': {1}'.format(
fun, err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The {0!r} cloud driver defined under {1!r} provider '
'alias is unable to get the images information'.format(
driver,
alias
)
)
continue
if alias not in data:
data[alias] = {}
try:
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'{0}()\': {1}'.format(
fun, err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The {0!r} cloud driver defined under {1!r} provider '
'alias is unable to get the sizes information'.format(
driver,
alias
)
)
continue
if alias not in data:
data[alias] = {}
try:
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'{0}()\': {1}'.format(
fun, err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in self.opts['profiles'].items():
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in matching.items():
for driver, vms in drivers.items():
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and len(parallel_data) > 0:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: {0}'.format(pool_size))
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver])
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
if not os.path.isfile(key_file) and globbed_key_file:
# Since we have globbed matches, there are probably
# some keys for which their minion configuration has
# append_domain set.
if len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with {0!r}. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'{0!r} is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete {0!r}? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted {0!r}'.format(filename))
break
print('Did not delete {0!r}'.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in pmap.items():
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in acts.items():
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating {0[name]!r} using {0[provider]!r} as the provider '
'cannot complete since {1!r} is not available'.format(
vm_,
driver
)
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if make_master is False and 'master' not in minion_dict:
raise SaltCloudConfigError(
(
'There\'s no master defined on the '
'{0!r} VM settings'
).format(vm_['name'])
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True:
if 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug(
'Generating the master keys for {0[name]!r}'.format(
vm_
)
)
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# a small pause makes the sync work reliably
time.sleep(3)
client = salt.client.get_local_client(mopts=self.opts)
ret = client.cmd(vm_['name'], 'saltutil.sync_{0}'.format(
self.opts['sync_after_install']
))
log.info('Synchronized the following dynamic modules:')
log.info(' {0}'.format(ret))
except KeyError as exc:
log.exception(
'Failed to create VM {0}. Configuration value {1} needs '
'to be set'.format(
vm_['name'], exc
)
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info(
'Running {0} on {1}'.format(
self.opts['start_action'], vm_['name']
)
)
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating {0[name]!r} using {0[provider]!r} as the provider '
'cannot complete since {1!r} is not available'.format(
extra_,
driver
)
)
return
try:
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
(
'Failed to perform {0[provider]}.{0[action]} '
'on {0[name]}. '
'Configuration value {1} needs to be set'
).format(extra_, exc)
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
profile_details = self.opts['profiles'][profile]
alias, driver = profile_details['provider'].split(':')
mapped_providers = self.map_providers_parallel()
alias_data = mapped_providers.setdefault(alias, {})
vms = alias_data.setdefault(driver, {})
for name in names:
name_exists = False
if name in vms:
if 'state' in vms[name]:
if vms[name]['state'].lower() != 'terminated':
name_exists = True
else:
name_exists = True
if name_exists:
msg = '{0} already exists under {1}:{2}'.format(
name, alias, driver
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = profile_details.copy()
vm_.update(vm_overrides)
vm_['name'] = name
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
names = set(names)
for alias, drivers in self.map_providers_parallel().items():
if not names:
break
for driver, vms in drivers.items():
if not names:
break
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info(
'\'{0}()\' is not available. Not actioning...'.format(
fun
)
)
continue
for vm_name, vm_details in vms.items():
if not names:
break
if vm_name not in names:
log.debug('vm:{0} in provider:{1} is not in name list:{2!r}'.format(
vm_name, driver, names
))
continue
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
if not names:
return ret
ret['Not Actioned/Not Running'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched {0!r}. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The {0!r} cloud provider alias, for the {1!r} driver, does '
'not define the function {2!r}'.format(alias, driver, func)
)
log.debug(
'Trying to execute {0!r} with the following kwargs: {1}'.format(
fun, kwargs
)
)
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in self.opts['providers'].copy().items():
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warn(
'The cloud driver, {0!r}, configured under the '
'{1!r} cloud provider alias was not loaded since '
'\'{2}()\' could not be found. Removing it from '
'the available providers list'.format(
driver, alias, fun
)
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warn(
'The cloud driver, {0!r}, configured under the '
'{1!r} cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list'.format(driver, alias)
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
class Map(Cloud):
'''
Create a VM stateful map execution object
'''
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query='list_nodes', cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in rendered_map.items():
names = set(mapped_vms)
if profile not in self.opts['profiles']:
if 'Errors' not in interpolated_map:
interpolated_map['Errors'] = {}
msg = (
'No provider for the mapped {0!r} profile was found. '
'Skipped VMS: {1}'.format(
profile, ', '.join(names)
)
)
log.info(msg)
interpolated_map['Errors'][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in matching.items():
for driver, vms in drivers.items():
for vm_name, vm_details in vms.items():
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
names.remove(vm_name)
if not names:
continue
profile_details = self.opts['profiles'][profile]
alias, driver = profile_details['provider'].split(':')
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = 'Absent'
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in query_map.copy().items():
for driver, vms in drivers.copy().items():
for vm_name, vm_details in vms.copy().items():
if vm_details == 'Absent':
query_map[alias][driver].pop(vm_name)
elif vm_details['state'].lower() not in ('running',
'active'):
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
'start': ['stopped'],
'stop': ['running', 'active'],
'reboot': ['running', 'active'],
}
vm_names = []
for alias, drivers in query_map.items():
for driver, vms in drivers.items():
for vm_name, vm_details in vms.items():
if (vm_details != 'Absent') and \
(
vm_details['state'].lower() in
matching_states[action]
):
vm_names.append(vm_name)
return vm_names
def read(self):
'''
Read in the specified map file and return the map structure
'''
if self.opts.get('map', None) is None:
return {}
if not os.path.isfile(self.opts['map']):
raise SaltCloudNotFound(
'The specified map file does not exist: {0}\n'.format(
self.opts['map']
)
)
try:
renderer = self.opts.get('renderer', 'yaml_jinja')
rend = salt.loader.render(self.opts, {})
map_ = compile_template(
self.opts['map'], rend, renderer
)
except Exception as exc:
log.error(
'Rendering map {0} failed, render error:\n{1}'.format(
self.opts['map'], exc
),
exc_info_on_loglevel=logging.DEBUG
)
return {}
if 'include' in map_:
map_ = salt.config.include_config(
map_, self.opts['map'], verbose=False
)
# Create expected data format if needed
for profile, mapped in map_.copy().items():
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in mapping.items():
if overrides is None:
# Foo:
# - bar1:
# - bar2:
overrides = {}
overrides.setdefault('name', name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in mapped.items():
overrides.setdefault('name', name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {'name': name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in dmap['create'].values():
seen = []
try:
machines = values['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap['create'][val]['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data['requires']
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap['create'][name]
except KeyError:
try:
data = dmap['existing'][name]
except KeyError:
msg = 'Missing dependency in cloud map'
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
'''
Create a data map of what to execute on
'''
ret = {'create': {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
for profile_name, nodes in self.rendered_map.items():
if profile_name not in self.opts['profiles']:
msg = (
'The required profile, {0!r}, defined in the map '
'does not exist. The defined nodes, {1}, will not '
'be created.'.format(
profile_name,
', '.join('{0!r}'.format(node) for node in nodes)
)
)
log.error(msg)
if 'errors' not in ret:
ret['errors'] = {}
ret['errors'][profile_name] = msg
continue
profile_data = self.opts['profiles'].get(profile_name)
for nodename, overrides in nodes.items():
# Get the VM name
nodedata = copy.deepcopy(profile_data)
# Update profile data with the map overrides
for setting in ('grains', 'master', 'minion', 'volumes',
'requires'):
deprecated = 'map_{0}'.format(setting)
if deprecated in overrides:
log.warn(
'The use of {0!r} on the {1!r} mapping has '
'been deprecated. The preferred way now is to '
'just define {2!r}. For now, salt-cloud will do '
'the proper thing and convert the deprecated '
'mapping into the preferred one.'.format(
deprecated, nodename, setting
)
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if 'minion' in overrides and 'minion' in nodedata:
if 'grains' in overrides['minion']:
if 'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(
overrides['minion']['grains']
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if len(overrides['minion']) == 0:
del overrides['minion']
nodedata.update(overrides)
# Add the computed information to the return data
ret['create'][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata['provider'].split(':')
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in pmap.items():
for driver, vms in drivers.items():
for vm_name, details in vms.items():
if vm_name == name:
if driver not in matches:
matches[driver] = details['state']
return matches
for alias, drivers in pmap.items():
for driver, vms in drivers.items():
for name, details in vms.items():
exist.add((alias, driver, name))
if name not in ret['create']:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for mdriver, state in matching.items():
if name not in ret['create']:
# Machine already removed
break
if mdriver not in ('aws', 'ec2') and \
state.lower() != 'terminated':
# Regarding other providers, simply remove
# them for the create map.
log.warn(
'{0!r} already exists, removing from '
'the create map'.format(name)
)
if 'existing' not in ret:
ret['existing'] = {}
ret['existing'][name] = ret['create'].pop(name)
continue
if state.lower() != 'terminated':
log.info(
'{0!r} already exists, removing '
'from the create map'.format(name)
)
if 'existing' not in ret:
ret['existing'] = {}
ret['existing'][name] = ret['create'].pop(name)
if self.opts['hard']:
if self.opts['enable_hard_maps'] is False:
raise SaltCloudSystemExit(
'The --hard map can be extremely dangerous to use, '
'and therefore must explicitly be enabled in the main '
'configuration file, by setting \'enable_hard_maps\' '
'to True'
)
# Hard maps are enabled, Look for the items to delete.
ret['destroy'] = exist.difference(defined)
return ret
def run_map(self, dmap):
'''
Execute the contents of the VM map
'''
if self._has_loop(dmap):
msg = 'Uh-oh, that cloud map has a dependency loop!'
log.error(msg)
raise SaltCloudException(msg)
#Go through the create list and calc dependencies
for key, val in dmap['create'].items():
log.info('Calculating dependencies for {0}'.format(key))
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order {0} for {1}'.format(level, key))
dmap['create'][key]['level'] = level
try:
existing_list = dmap['existing'].items()
except KeyError:
existing_list = {}
for key, val in existing_list:
log.info('Calculating dependencies for {0}'.format(key))
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order {0} for {1}'.format(level, key))
dmap['existing'][key]['level'] = level
#Now sort the create list based on dependencies
create_list = sorted(dmap['create'].items(),
key=lambda x: x[1]['level'])
output = {}
if self.opts['parallel']:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
try:
master_name, master_profile = next((
(name, profile) for name, profile in create_list
if profile.get('make_master', False) is True
))
master_minion_name = master_name
log.debug('Creating new master {0!r}'.format(master_name))
if salt.config.get_cloud_config_value(
'deploy',
master_profile,
self.opts
) is False:
raise SaltCloudSystemExit(
'Cannot proceed with \'make_master\' when salt deployment '
'is disabled(ex: --no-deploy).'
)
# Generate the master keys
log.debug(
'Generating master keys for {0[name]!r}'.format(master_profile)
)
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
master_profile,
self.opts
)
)
master_profile['master_pub'] = pub
master_profile['master_pem'] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.mkstemp()
with salt.utils.fopen(master_temp_pub, 'w') as mtp:
mtp.write(pub)
master_finger = salt.utils.pem_finger(master_temp_pub)
os.unlink(master_temp_pub)
if master_profile.get('make_minion', True) is True:
master_profile.setdefault('minion', {})
if 'id' in master_profile['minion']:
master_minion_name = master_profile['minion']['id']
# Set this minion's master as local if the user has not set it
if 'master' not in master_profile['minion']:
master_profile['minion']['master'] = '127.0.0.1'
if master_finger is not None:
master_profile['master_finger'] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
'make_minion', profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug(
'Generating minion keys for {0[name]!r}'.format(profile)
)
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
profile,
self.opts
)
)
profile['pub_key'] = pub
profile['priv_key'] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault('preseed_minion_keys', {})
master_profile['preseed_minion_keys'].update({name: pub})
local_master = False
if master_profile['minion'].get('local_master', False) and \
master_profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug(
'Master creation details is not a dictionary: {0}'.format(
out
)
)
elif 'Errors' in out:
raise SaltCloudSystemExit(
'An error occurred while creating the master, not '
'continuing: {0}'.format(out['Errors'])
)
deploy_kwargs = (
self.opts.get('show_deploy_args', False) is True and
# Get the needed data
out.get('deploy_kwargs', {}) or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop('deploy_kwargs', {})
)
master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None))
if master_host is None:
raise SaltCloudSystemExit(
'Host for new master {0} was not found, '
'aborting map'.format(
master_name
)
)
output[master_name] = out
except StopIteration:
log.debug('No make_master found in map')
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts['pki_dir'], 'master.pub')
if os.path.isfile(master_pub):
master_finger = salt.utils.pem_finger(master_pub)
opts = self.opts.copy()
if self.opts['parallel']:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
'Since parallel deployment is in use, ssh console output '
'is disabled. All ssh output will be logged though'
)
opts['display_ssh_output'] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if 'minion' in profile and profile['minion'].get('local_master', False) and \
profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile['master_finger'] = master_finger
if master_host is not None:
profile.setdefault('minion', {})
profile['minion'].setdefault('master', master_host)
if self.opts['parallel']:
parallel_data.append({
'opts': opts,
'name': name,
'profile': profile,
'local_master': local_master
})
continue
# Not deploying in parallel
try:
output[name] = self.create(
profile, local_master=local_master
)
if self.opts.get('show_deploy_args', False) is False:
output[name].pop('deploy_kwargs', None)
except SaltCloudException as exc:
log.error(
'Failed to deploy {0!r}. Error: {1}'.format(
name, exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
output[name] = {'Error': str(exc)}
for name in dmap.get('destroy', ()):
output[name] = self.destroy(name)
if self.opts['parallel'] and len(parallel_data) > 0:
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Cloud pool size: {0}'.format(pool_size))
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts['start_action']:
actionlist = []
grp = -1
for key, val in groupby(iter(dmap['create'].values()),
lambda x: x['level']):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item['name'])
out = {}
for group in actionlist:
log.info(
'Running {0} on {1}'.format(
self.opts['start_action'], ', '.join(group)
)
)
client = salt.client.get_local_client()
out.update(client.cmd(
','.join(group), self.opts['start_action'],
timeout=self.opts['timeout'] * 60, expr_form='list'
))
for obj in output_multip:
next(iter(obj.values()))['ret'] = out[next(iter(obj.keys()))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
def init_pool_worker():
'''
Make every worker ignore KeyboarInterrup's since it will be handled by the
parent process.
'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
def create_multiprocessing(parallel_data, queue=None):
'''
This function will be called from another process when running a map in
parallel mode. The result from the create is always a json object.
'''
parallel_data['opts']['output'] = 'json'
cloud = Cloud(parallel_data['opts'])
try:
output = cloud.create(
parallel_data['profile'],
local_master=parallel_data['local_master']
)
except SaltCloudException as exc:
log.error(
'Failed to deploy {0[name]!r}. Error: {1}'.format(
parallel_data, exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {parallel_data['name']: {'Error': str(exc)}}
if parallel_data['opts'].get('show_deploy_args', False) is False:
output.pop('deploy_kwargs', None)
return {
parallel_data['name']: salt.utils.cloud.simple_types_filter(output)
}
def destroy_multiprocessing(parallel_data, queue=None):
'''
This function will be called from another process when running a map in
parallel mode. The result from the destroy is always a json object.
'''
parallel_data['opts']['output'] = 'json'
clouds = salt.loader.clouds(parallel_data['opts'])
try:
fun = clouds['{0}.destroy'.format(parallel_data['driver'])]
with context.func_globals_inject(
fun,
__active_provider_name__=':'.join([
parallel_data['alias'],
parallel_data['driver']
])
):
output = fun(parallel_data['name'])
except SaltCloudException as exc:
log.error(
'Failed to destroy {0}. Error: {1}'.format(
parallel_data['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {parallel_data['name']: {'Error': str(exc)}}
return {
parallel_data['name']: salt.utils.cloud.simple_types_filter(output)
}
def run_parallel_map_providers_query(data, queue=None):
'''
This function will be called from another process when building the
providers map.
'''
try:
import Crypto.Random # pylint: disable=E0611
Crypto.Random.atfork()
except ImportError:
# PyCrypto version < 2.1
pass
cloud = Cloud(data['opts'])
try:
with context.func_globals_inject(
cloud.clouds[data['fun']],
__active_provider_name__=':'.join([
data['alias'],
data['driver']
])
):
return (
data['alias'],
data['driver'],
salt.utils.cloud.simple_types_filter(
cloud.clouds[data['fun']]()
)
)
except Exception as err:
log.debug(
'Failed to execute \'{0}()\' while querying for running '
'nodes: {1}'.format(data['fun'], err),
# Show the traceback if the debug logging level is
# enabled
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any nodes
return (data['alias'], data['driver'], ())
# for pickle and multiprocessing, we can't use directly decorators
def _run_parallel_map_providers_query(*args, **kw):
return communicator(run_parallel_map_providers_query)(*args[0], **kw)
def _destroy_multiprocessing(*args, **kw):
return communicator(destroy_multiprocessing)(*args[0], **kw)
def _create_multiprocessing(*args, **kw):
return communicator(create_multiprocessing)(*args[0], **kw)
#
|
the-stack_0_25788
|
import numpy as np
import pytest
import scipy.sparse
import tensorflow as tf
import torch
from scipy.sparse import csr_matrix
from docarray import DocumentArray, Document
from tests import random_docs
rand_array = np.random.random([10, 3])
def da_and_dam():
rand_docs = random_docs(100)
da = DocumentArray()
da.extend(rand_docs)
return (da,)
@pytest.mark.parametrize(
'array',
[
rand_array,
torch.Tensor(rand_array),
tf.constant(rand_array),
csr_matrix(rand_array),
],
)
def test_set_embeddings_multi_kind(array):
da = DocumentArray([Document() for _ in range(10)])
da.embeddings = array
@pytest.mark.parametrize('da', da_and_dam())
def test_da_get_embeddings(da):
np.testing.assert_almost_equal(da._get_attributes('embedding'), da.embeddings)
np.testing.assert_almost_equal(da[:, 'embedding'], da.embeddings)
@pytest.mark.parametrize('da', da_and_dam())
def test_embeddings_setter_da(da):
emb = np.random.random((100, 128))
da.embeddings = emb
np.testing.assert_almost_equal(da.embeddings, emb)
for x, doc in zip(emb, da):
np.testing.assert_almost_equal(x, doc.embedding)
da.embeddings = None
if hasattr(da, 'flush'):
da.flush()
assert not da.embeddings
@pytest.mark.parametrize('da', da_and_dam())
def test_embeddings_wrong_len(da):
embeddings = np.ones((2, 10))
with pytest.raises(ValueError):
da.embeddings = embeddings
@pytest.mark.parametrize('da', da_and_dam())
def test_blobs_getter_da(da):
blobs = np.random.random((100, 10, 10))
da.blobs = blobs
assert len(da) == 100
np.testing.assert_almost_equal(da.blobs, blobs)
da.blobs = None
if hasattr(da, 'flush'):
da.flush()
assert not da.blobs
@pytest.mark.parametrize('da', da_and_dam())
def test_texts_getter_da(da):
assert len(da.texts) == 100
assert da.texts == da[:, 'text']
texts = ['text' for _ in range(100)]
da.texts = texts
assert da.texts == texts
for x, doc in zip(texts, da):
assert x == doc.text
da.texts = None
if hasattr(da, 'flush'):
da.flush()
# unfortunately protobuf does not distinguish None and '' on string
# so non-set str field in Pb is ''
assert not da.texts
@pytest.mark.parametrize('da', da_and_dam())
def test_texts_wrong_len(da):
texts = ['hello']
with pytest.raises(ValueError):
da.texts = texts
@pytest.mark.parametrize('da', da_and_dam())
def test_blobs_wrong_len(da):
blobs = np.ones((2, 10, 10))
with pytest.raises(ValueError):
da.blobs = blobs
@pytest.mark.parametrize('da', da_and_dam())
def test_buffers_getter_setter(da):
with pytest.raises(ValueError):
da.buffers = [b'cc', b'bb', b'aa', b'dd']
da.buffers = [b'aa'] * len(da)
assert da.buffers == [b'aa'] * len(da)
da.buffers = None
if hasattr(da, 'flush'):
da.flush()
# unfortunately protobuf does not distinguish None and '' on string
# so non-set str field in Pb is ''
assert not da.buffers
def test_zero_embeddings():
a = np.zeros([10, 6])
da = DocumentArray.empty(10)
# all zero, dense
da.embeddings = a
np.testing.assert_almost_equal(da.embeddings, a)
for d in da:
assert d.embedding.shape == (6,)
# all zero, sparse
sp_a = scipy.sparse.coo_matrix(a)
da.embeddings = sp_a
np.testing.assert_almost_equal(da.embeddings.todense(), sp_a.todense())
for d in da:
# scipy sparse row-vector can only be a (1, m) not squeezible
assert d.embedding.shape == (1, 6)
# near zero, sparse
a = np.random.random([10, 6])
a[a > 0.1] = 0
sp_a = scipy.sparse.coo_matrix(a)
da.embeddings = sp_a
np.testing.assert_almost_equal(da.embeddings.todense(), sp_a.todense())
for d in da:
# scipy sparse row-vector can only be a (1, m) not squeezible
assert d.embedding.shape == (1, 6)
|
the-stack_0_25789
|
import numbers
from typing import Callable, Union, Any, Optional
from ignite.metrics import Metric
from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
from ignite.exceptions import NotComputableError
import torch
__all__ = ["VariableAccumulation", "GeometricAverage", "Average"]
class VariableAccumulation(Metric):
"""Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.
- `update` must receive output of the form `x`.
- `x` can be a number or `torch.Tensor`.
Note:
The class stores input into two public variables: `accumulator` and `num_examples`.
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
Args:
op (callable): a callable to update accumulator. Method's signature is `(accumulator, output)`.
For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.Engine`'s `process_function`'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device (str of torch.device, optional): device specification in case of distributed computation usage.
In most of the cases, it can be defined as "cuda:local_rank" or "cuda"
if already set `torch.cuda.set_device(local_rank)`. By default, if a distributed process group is
initialized and available, device is set to `cuda`.
"""
_required_output_keys = None
def __init__(
self, op: Callable, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None
):
if not callable(op):
raise TypeError("Argument op should be a callable, but given {}".format(type(op)))
self.accumulator = None
self.num_examples = None
self._op = op
super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self.num_examples = torch.tensor(0.0, dtype=torch.long, device=self._device)
def _check_output_type(self, output: Union[Any, torch.Tensor, numbers.Number]) -> None:
if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)):
raise TypeError("Output should be a number or torch.Tensor, but given {}".format(type(output)))
@reinit__is_reduced
def update(self, output: Union[Any, torch.Tensor, numbers.Number]) -> None:
self._check_output_type(output)
if self._device is not None:
# Put output to the metric's device
if isinstance(output, torch.Tensor) and (output.device != self._device):
output = output.to(self._device)
self.accumulator = self._op(self.accumulator, output)
if hasattr(output, "shape"):
self.num_examples += output.shape[0] if len(output.shape) > 1 else 1
else:
self.num_examples += 1
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> list:
return [self.accumulator, self.num_examples]
class Average(VariableAccumulation):
"""Helper class to compute arithmetic average of a single variable.
- `update` must receive output of the form `x`.
- `x` can be a number or `torch.Tensor`.
Note:
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`
Examples:
.. code-block:: python
evaluator = ...
custom_var_mean = Average(output_transform=lambda output: output['custom_var'])
custom_var_mean.attach(evaluator, 'mean_custom_var')
state = evaluator.run(dataset)
# state.metrics['mean_custom_var'] -> average of output['custom_var']
Args:
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.Engine`'s `process_function`'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device (str of torch.device): device specification in case of distributed computation usage.
In most of the cases, it should defined as "cuda:local_rank".
"""
def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None):
def _mean_op(a, x):
if isinstance(x, torch.Tensor) and x.ndim > 1:
x = x.sum(dim=0)
return a + x
super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[Any, torch.Tensor, numbers.Number]:
if self.num_examples < 1:
raise NotComputableError(
"{} must have at least one example before" " it can be computed.".format(self.__class__.__name__)
)
return self.accumulator / self.num_examples
class GeometricAverage(VariableAccumulation):
"""Helper class to compute geometric average of a single variable.
- `update` must receive output of the form `x`.
- `x` can be a positive number or a positive `torch.Tensor`, such that `torch.log(x)` is not `nan`.
Note:
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`
Args:
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.Engine`'s `process_function`'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device (str of torch.device): device specification in case of distributed computation usage.
In most of the cases, it should defined as "cuda:local_rank".
"""
def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None):
def _geom_op(a: torch.Tensor, x: Union[Any, numbers.Number, torch.Tensor]) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
x = torch.log(x)
if x.ndim > 1:
x = x.sum(dim=0)
return a + x
super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> torch.Tensor:
if self.num_examples < 1:
raise NotComputableError(
"{} must have at least one example before" " it can be computed.".format(self.__class__.__name__)
)
return torch.exp(self.accumulator / self.num_examples)
|
the-stack_0_25790
|
"""Implementation of the intermediary layer between Shapely and GEOS
This is layer number 2 from the list below.
1) geometric objects: the Python OO API.
2) implementation map: an abstraction that permits different backends.
3) backend: callable objects that take Shapely geometric objects as arguments
and, with GEOS as a backend, translate them to C data structures.
4) GEOS library: algorithms implemented in C++.
Shapely 1.2 includes a GEOS backend and it is the default.
"""
from functools import wraps
from shapely.algorithms import cga
from shapely.coords import BoundsOp
from shapely.geos import lgeos
from shapely.linref import ProjectOp, InterpolateOp
from shapely.predicates import BinaryPredicate, UnaryPredicate
from shapely.topology import BinaryRealProperty, BinaryTopologicalOp
from shapely.topology import UnaryRealProperty, UnaryTopologicalOp
class ImplementationError(
AttributeError, KeyError, NotImplementedError):
"""To be raised when the registered implementation does not
support the requested method."""
def delegated(func):
"""A delegated method raises AttributeError in the absence of backend
support."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError:
raise ImplementationError(
"Method '%s' not provided by registered "
"implementation '%s'" % (func.__name__, args[0].impl))
return wrapper
# Map geometry methods to their GEOS delegates
class BaseImpl(object):
"""Base class for registrable implementations."""
def __init__(self, values):
self.map = dict(values)
def update(self, values):
self.map.update(values)
def __getitem__(self, key):
try:
return self.map[key]
except KeyError:
raise ImplementationError(
"Method '%s' not provided by registered "
"implementation '%s'" % (key, self.map))
def __contains__(self, key):
return key in self.map
class GEOSImpl(BaseImpl):
"""GEOS implementation"""
def __repr__(self):
return '<GEOSImpl object: GEOS C API version %s>' % (
lgeos.geos_capi_version,)
IMPL300 = {
'area': (UnaryRealProperty, 'area'),
'distance': (BinaryRealProperty, 'distance'),
'length': (UnaryRealProperty, 'length'),
#
'boundary': (UnaryTopologicalOp, 'boundary'),
'bounds': (BoundsOp, None),
'centroid': (UnaryTopologicalOp, 'centroid'),
'representative_point': (UnaryTopologicalOp, 'representative_point'),
'envelope': (UnaryTopologicalOp, 'envelope'),
'convex_hull': (UnaryTopologicalOp, 'convex_hull'),
'buffer': (UnaryTopologicalOp, 'buffer'),
#
'difference': (BinaryTopologicalOp, 'difference'),
'intersection': (BinaryTopologicalOp, 'intersection'),
'symmetric_difference': (BinaryTopologicalOp, 'symmetric_difference'),
'union': (BinaryTopologicalOp, 'union'),
#
'has_z': (UnaryPredicate, 'has_z'),
'is_empty': (UnaryPredicate, 'is_empty'),
'is_ring': (UnaryPredicate, 'is_ring'),
'is_simple': (UnaryPredicate, 'is_simple'),
'is_valid': (UnaryPredicate, 'is_valid'),
#
'relate': (BinaryPredicate, 'relate'),
'contains': (BinaryPredicate, 'contains'),
'crosses': (BinaryPredicate, 'crosses'),
'disjoint': (BinaryPredicate, 'disjoint'),
'equals': (BinaryPredicate, 'equals'),
'intersects': (BinaryPredicate, 'intersects'),
'overlaps': (BinaryPredicate, 'overlaps'),
'touches': (BinaryPredicate, 'touches'),
'within': (BinaryPredicate, 'within'),
'covers': (BinaryPredicate, 'covers'),
'equals_exact': (BinaryPredicate, 'equals_exact'),
'relate_pattern': (BinaryPredicate, 'relate_pattern'),
# First pure Python implementation
'is_ccw': (cga.is_ccw_impl, 'is_ccw'),
}
IMPL310 = {
'simplify': (UnaryTopologicalOp, 'simplify'),
'topology_preserve_simplify':
(UnaryTopologicalOp, 'topology_preserve_simplify'),
'prepared_disjoint': (BinaryPredicate, 'prepared_disjoint'),
'prepared_touches': (BinaryPredicate, 'prepared_touches'),
'prepared_crosses': (BinaryPredicate, 'prepared_crosses'),
'prepared_within': (BinaryPredicate, 'prepared_within'),
'prepared_overlaps': (BinaryPredicate, 'prepared_overlaps'),
'prepared_intersects': (BinaryPredicate, 'prepared_intersects'),
'prepared_contains': (BinaryPredicate, 'prepared_contains'),
'prepared_contains_properly':
(BinaryPredicate, 'prepared_contains_properly'),
'prepared_covers': (BinaryPredicate, 'prepared_covers'),
}
IMPL311 = {
}
IMPL320 = {
'parallel_offset': (UnaryTopologicalOp, 'parallel_offset'),
'project_normalized': (ProjectOp, 'project_normalized'),
'project': (ProjectOp, 'project'),
'interpolate_normalized': (InterpolateOp, 'interpolate_normalized'),
'interpolate': (InterpolateOp, 'interpolate'),
'buffer_with_style': (UnaryTopologicalOp, 'buffer_with_style'),
'hausdorff_distance': (BinaryRealProperty, 'hausdorff_distance'),
}
IMPL330 = {
'is_closed': (UnaryPredicate, 'is_closed'),
'buffer_with_params': (UnaryTopologicalOp, 'buffer_with_params'),
'covered_by': (BinaryPredicate, 'covered_by')
}
IMPL360 = {
'minimum_clearance': (UnaryRealProperty, 'minimum_clearance')
}
def impl_items(defs):
return [(k, v[0](v[1])) for k, v in list(defs.items())]
imp = GEOSImpl(dict(impl_items(IMPL300)))
if lgeos.geos_version >= (3, 1, 0):
imp.update(impl_items(IMPL310))
if lgeos.geos_version >= (3, 1, 1):
imp.update(impl_items(IMPL311))
if lgeos.geos_version >= (3, 2, 0):
imp.update(impl_items(IMPL320))
if lgeos.geos_version >= (3, 3, 0):
imp.update(impl_items(IMPL330))
if lgeos.geos_version >= (3, 6, 0):
imp.update(impl_items(IMPL360))
DefaultImplementation = imp
|
the-stack_0_25793
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# wagtail-personalisation documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 19 15:12:32 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'wagtail-personalisation'
copyright = '2017, Lab Digital BV'
author = 'Lab Digital BV'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.11.3'
# The full version, including alpha/beta/rc tags.
release = '0.11.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'github_user': 'LabD',
'github_banner': True,
'github_repo': 'wagtail-personalisation',
'travis_button': True,
'codecov_button': True,
'analytics_id': 'UA-100203499-2',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'wagtail-personalisationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wagtail-personalisation.tex', 'wagtail-personalisation Documentation',
'Lab Digital BV', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wagtail-personalisation', 'wagtail-personalisation Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wagtail-personalisation', 'wagtail-personalisation Documentation',
author, 'wagtail-personalisation', 'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_25795
|
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Step 2 to load ZetaSQL dependencies. """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
# Followup from zetasql_deps_step_1.bzl
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
def _load_deps_from_step_1():
rules_foreign_cc_dependencies()
def zetasql_deps_step_2():
"""Step 2 macro to include ZetaSQL's critical dependencies in a WORKSPACE.
"""
# Followup from zetasql_deps_step_1.bzl
_load_deps_from_step_1()
if not native.existing_rule("com_googleapis_googleapis"):
# Very rarely updated, but just in case, here's how:
# COMMIT=<paste commit hex>
# PREFIX=googleapis-
# REPO=https://github.com/googleapis/googleapis/archive
# URL=${REPO}/${COMMIT}.tar.gz
# wget $URL
# SHA256=$(sha256sum ${COMMIT}.tar.gz | cut -f1 -d' ')
# rm ${COMMIT}.tar.gz
# echo url = \"$URL\",
# echo sha256 = \"$SHA256\",
# echo strip_prefix = \"${PREFIX}${COMMIT}\",
http_archive(
name = "com_googleapis_googleapis",
url = "https://github.com/googleapis/googleapis/archive/common-protos-1_3_1.tar.gz",
sha256 = "9584b7ac21de5b31832faf827f898671cdcb034bd557a36ea3e7fc07e6571dcb",
strip_prefix = "googleapis-common-protos-1_3_1",
build_file_content = """
proto_library(
name = "date_proto",
visibility = ["//visibility:public"],
srcs = ["google/type/date.proto"])
cc_proto_library(
name = "date_cc_proto",
visibility = ["//visibility:public"],
deps = [":date_proto"])
proto_library(
name = "latlng_proto",
visibility = ["//visibility:public"],
srcs = ["google/type/latlng.proto"])
cc_proto_library(
name = "latlng_cc_proto",
visibility = ["//visibility:public"],
deps = [":latlng_proto"])
proto_library(
name = "timeofday_proto",
visibility = ["//visibility:public"],
srcs = ["google/type/timeofday.proto"])
cc_proto_library(
name = "timeofday_cc_proto",
visibility = ["//visibility:public"],
deps = [":timeofday_proto"])
""",
)
# Abseil
if not native.existing_rule("com_google_absl"):
# How to update:
# Abseil generally just does daily (or even subdaily) releases. None are
# special, so just periodically update as necessary.
#
# https://github.com/abseil/abseil-cpp/commits/master
# pick a recent release.
# Hit the 'clipboard with a left arrow' icon to copy the commit hex
# COMMIT=<paste commit hex>
# PREFIX=abseil-cpp-
# REPO=https://github.com/abseil/abseil-cpp/archive
# URL=${REPO}/${COMMIT}.tar.gz
# wget $URL
# SHA256=$(sha256sum ${COMMIT}.tar.gz | cut -f1 -d' ')
# rm ${COMMIT}.tar.gz
# echo \# Commit from $(date --iso-8601=date)
# echo url = \"$URL\",
# echo sha256 = \"$SHA256\",
# echo strip_prefix = \"${PREFIX}${COMMIT}\",
#
http_archive(
name = "com_google_absl",
# Commit from 2021-02-23
url = "https://github.com/abseil/abseil-cpp/archive/a50ae369a30f99f79d7559002aba3413dac1bd48.tar.gz",
sha256 = "be2a9d7ea7ee15f9317b57beff37e8ffb67418fb0df64592366b04c8618c2584",
strip_prefix = "abseil-cpp-a50ae369a30f99f79d7559002aba3413dac1bd48",
)
# Abseil (Python)
if not native.existing_rule("com_google_absl_py"):
# How to update:
# Abseil generally just does daily (or even subdaily) releases. None are
# special, so just periodically update as necessary.
#
# https://github.com/abseil/abseil-cpp
# navigate to "commits"
# pick a recent release.
# Hit the 'clipboard with a left arrow' icon to copy the commit hex
#
# COMMITHEX=<commit hex>
# URL=https://github.com/google/absl-cpp/archive/${COMMITHEX}.tar.gz
# wget $URL absl.tar.gz
# sha256sum absl.tar.gz # Spits out checksum of tarball
#
# update urls with $URL
# update sha256 with result of sha256sum
# update strip_prefix with COMMITHEX
http_archive(
name = "io_abseil_py",
# Non-release commit from April 18, 2018
urls = [
"https://github.com/abseil/abseil-py/archive/bd4d245ac1e36439cb44e7ac46cd1b3e48d8edfa.tar.gz",
],
sha256 = "62a536b13840dc7e3adec333c1ea4c483628ce39a9fdd41e7b3e027f961eb371",
strip_prefix = "abseil-py-bd4d245ac1e36439cb44e7ac46cd1b3e48d8edfa",
)
# Riegeli
if not native.existing_rule("com_google_riegeli"):
# How to update:
# Abseil generally just does daily (or even subdaily) releases. None are
# special, so just periodically update as necessary.
#
# https://github.com/abseil/abseil-cpp/commits/master
# pick a recent release.
# Hit the 'clipboard with a left arrow' icon to copy the commit hex
# COMMIT=<paste commit hex>
# PREFIX=abseil-cpp-
# REPO=https://github.com/abseil/abseil-cpp/archive
# URL=${REPO}/${COMMIT}.tar.gz
# wget $URL
# SHA256=$(sha256sum ${COMMIT}.tar.gz | cut -f1 -d' ')
# rm ${COMMIT}.tar.gz
# echo \# Commit from $(date --iso-8601=date)
# echo url = \"$URL\",
# echo sha256 = \"$SHA256\",
# echo strip_prefix = \"${PREFIX}${COMMIT}\",
#
http_archive(
name = "com_google_riegeli",
# Commit from 2021-06-01
url = "https://github.com/google/riegeli/archive/baf6376f694d401932cf1b9d34e79a0fae50e7c4.tar.gz",
sha256 = "15b6da71683520b8c2eadf11eb8180eed567568740562c88ead69a560b8cd219",
strip_prefix = "riegeli-baf6376f694d401932cf1b9d34e79a0fae50e7c4",
)
# Differential Privacy
if not native.existing_rule("com_google_differential_privacy"):
http_archive(
name = "com_google_differential_privacy",
# Commit from 2021-01-21
url = "https://github.com/google/differential-privacy/archive/de8460c9791de4c89a9dbb906b11a8f62e045f7b.tar.gz",
sha256 = "e9f01b00e760724909a7ff7acf26855a802417a23bde54d6baec9168cfbe1dc4",
strip_prefix = "differential-privacy-de8460c9791de4c89a9dbb906b11a8f62e045f7b",
)
# Differential Privacy - cc
if not native.existing_rule("com_google_cc_differential_privacy"):
http_archive(
name = "com_google_cc_differential_privacy",
# Commit from 2021-01-21
url = "https://github.com/google/differential-privacy/archive/de8460c9791de4c89a9dbb906b11a8f62e045f7b.tar.gz",
sha256 = "e9f01b00e760724909a7ff7acf26855a802417a23bde54d6baec9168cfbe1dc4",
strip_prefix = "differential-privacy-de8460c9791de4c89a9dbb906b11a8f62e045f7b/cc",
)
# Boringssl
if not native.existing_rule("boringssl"):
http_archive(
name = "boringssl",
# Commit from 2019 October 29
urls = [
"https://github.com/google/boringssl/archive/9e18928936ccb882192e9779b0fd355bec739bdd.tar.gz",
],
sha256 = "19a951d1706a67be480809f6a6231675d29841be5682a7fe40bbcdf1e16f0147",
strip_prefix = "boringssl-9e18928936ccb882192e9779b0fd355bec739bdd",
)
# Farmhash
if not native.existing_rule("com_google_farmhash"):
http_archive(
name = "com_google_farmhash",
build_file = "@com_google_zetasql//bazel:farmhash.BUILD",
url = "https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
)
# required by protobuf_python
if not native.existing_rule("six_archive"):
http_archive(
name = "six_archive",
build_file = "@com_google_protobuf//:six.BUILD",
# Release 1.10.0
url = "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
)
native.bind(
name = "six",
actual = "@six_archive//:six",
)
# Protobuf
if not native.existing_rule("com_google_protobuf"):
http_archive(
name = "com_google_protobuf",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.6.1.3.tar.gz"],
sha256 = "73fdad358857e120fd0fa19e071a96e15c0f23bb25f85d3f7009abfd4f264a2a",
strip_prefix = "protobuf-3.6.1.3",
patches = ["@com_google_zetasql//bazel:protobuf-v3.6.1.3.patch"],
)
# Required by gRPC
if not native.existing_rule("build_bazel_rules_apple"):
http_archive(
name = "build_bazel_rules_apple",
urls = ["https://github.com/bazelbuild/rules_apple/archive/0.18.0.tar.gz"],
sha256 = "53a8f9590b4026fbcfefd02c868e48683b44a314338d03debfb8e8f6c50d1239",
strip_prefix = "rules_apple-0.18.0",
)
# Required by gRPC
if not native.existing_rule("build_bazel_apple_support"):
http_archive(
name = "build_bazel_apple_support",
urls = ["https://github.com/bazelbuild/apple_support/archive/0.7.1.tar.gz"],
sha256 = "140fa73e1c712900097aabdb846172ffa0a5e9523b87d6c564c13116a6180a62",
strip_prefix = "apple_support-0.7.1",
)
if not native.existing_rule("com_google_file_based_test_driver"):
http_archive(
name = "com_google_file_based_test_driver",
# Commit from 2020-11-24
url = "https://github.com/google/file-based-test-driver/archive/77e24638ad40ec67dcbf6e37fd57e20c5d98976e.tar.gz",
sha256 = "fdb5d0138cc013b8b8d21b0d1827a1296621f1bfa599ef889a69eeed73a6f24b",
strip_prefix = "file-based-test-driver-77e24638ad40ec67dcbf6e37fd57e20c5d98976e",
)
# gRPC
if not native.existing_rule("com_github_grpc_grpc"):
http_archive(
name = "com_github_grpc_grpc",
urls = ["https://github.com/grpc/grpc/archive/v1.24.2.tar.gz"],
sha256 = "fd040f5238ff1e32b468d9d38e50f0d7f8da0828019948c9001e9a03093e1d8f",
strip_prefix = "grpc-1.24.2",
)
# gRPC Java
if not native.existing_rule("io_grpc_grpc_java"):
http_archive(
name = "io_grpc_grpc_java",
# Release 1.22.1
url = "https://github.com/grpc/grpc-java/archive/v1.22.1.tar.gz",
strip_prefix = "grpc-java-1.22.1",
sha256 = "6e63bd6f5a82de0b84c802390adb8661013bad9ebf910ad7e1f3f72b5f798832",
)
if not native.existing_rule("com_google_code_findbugs_jsr305"):
jvm_maven_import_external(
name = "com_google_code_findbugs_jsr305",
artifact = "com.google.code.findbugs:jsr305:3.0.2",
tags = ["maven_coordinates=com.google.code.findbugs:jsr305:3.0.2"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "766ad2a0783f2687962c8ad74ceecc38a28b9f72a2d085ee438b7813e928d0c7",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("com_google_errorprone_error_prone_annotations"):
jvm_maven_import_external(
name = "com_google_errorprone_error_prone_annotations",
artifact = "com.google.errorprone:error_prone_annotations:2.3.2",
tags = ["maven_coordinates=com.google.errorprone:error_prone_annotations:2.3.2"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "357cd6cfb067c969226c442451502aee13800a24e950fdfde77bcdb4565a668d",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("com_google_j2objc_j2objc_annotations"):
jvm_maven_import_external(
name = "com_google_j2objc_j2objc_annotations",
artifact = "com.google.j2objc:j2objc-annotations:1.1",
tags = ["maven_coordinates=com.google.j2objc:j2objc-annotations:1.1"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "2994a7eb78f2710bd3d3bfb639b2c94e219cedac0d4d084d516e78c16dddecf6",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("org_codehaus_mojo_animal_sniffer_annotations"):
jvm_maven_import_external(
name = "org_codehaus_mojo_animal_sniffer_annotations",
artifact = "org.codehaus.mojo:animal-sniffer-annotations:1.17",
tags = ["maven_coordinates=org.codehaus.mojo:animal-sniffer-annotations:1.17"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "92654f493ecfec52082e76354f0ebf87648dc3d5cec2e3c3cdb947c016747a53",
licenses = ["notice"], # MIT
)
if not native.existing_rule("com_google_guava_guava"):
jvm_maven_import_external(
name = "com_google_guava_guava",
artifact = "com.google.guava:guava:29.0-jre",
tags = ["maven_coordinates=com.google.guava:guava:29.0-jre"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "b22c5fb66d61e7b9522531d04b2f915b5158e80aa0b40ee7282c8bfb07b0da25",
licenses = ["notice"], # Apache 2.0
)
native.bind(
name = "guava",
actual = "@com_google_guava_guava//jar",
)
if not native.existing_rule("com_google_guava_testlib"):
jvm_maven_import_external(
name = "com_google_guava_testlib",
artifact = "com.google.guava:guava-testlib:29.0-jre",
tags = ["maven_coordinates=com.google.guava:testlib:29.0-jre"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "f15908de80ff261adde7823d1df599b7447de4863f7c521068ecf881cb1fc79b",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("com_google_code_gson_gson"):
jvm_maven_import_external(
name = "com_google_code_gson_gson",
artifact = "com.google.code.gson:gson:jar:2.7",
tags = ["maven_coordinates=com.google.code.gson:gson:jar:2.7"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "2d43eb5ea9e133d2ee2405cc14f5ee08951b8361302fdd93494a3a997b508d32",
licenses = ["notice"], # Apache 2.0
)
native.bind(
name = "gson",
actual = "@com_google_code_gson_gson//jar",
)
if not native.existing_rule("com_google_truth_truth"):
jvm_maven_import_external(
name = "com_google_truth_truth",
testonly = 1,
artifact = "com.google.truth:truth:0.44",
tags = ["maven_coordinates=com.google.truth:truth:0.44"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "a9e6796786c9c77a5fe19b08e72fe0a620d53166df423d8861af9ebef4dc4247",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("com_google_truth_proto_extension"):
jvm_maven_import_external(
name = "com_google_truth_proto_extension",
testonly = 1,
artifact = "com.google.truth.extensions:truth-proto-extension:0.44",
tags = ["maven_coordinates=com.google.truth.extensions:truth-proto-extension:0.44"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "d964495cee74d6933512c7b414c8723285a6413a4e3f46f558fbaf624dfd7c9f",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_buffer"):
jvm_maven_import_external(
name = "io_netty_netty_buffer",
artifact = "io.netty:netty-buffer:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-buffer:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "39dfe88df8505fd01fbf9c1dbb6b6fa9b0297e453c3dc4ce039ea578aea2eaa3",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_codec"):
jvm_maven_import_external(
name = "io_netty_netty_codec",
artifact = "io.netty:netty-codec:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-codec:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "52e9eeb3638a8ed0911c72a508c05fa4f9d3391125eae46f287d3a8a0776211d",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_codec_http"):
jvm_maven_import_external(
name = "io_netty_netty_codec_http",
artifact = "io.netty:netty-codec-http:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-codec-http:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "5df5556ef6b0e7ce7c72a359e4ca774fcdf8d8fe12f0b6332715eaa44cfe41f8",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_codec_http2"):
jvm_maven_import_external(
name = "io_netty_netty_codec_http2",
artifact = "io.netty:netty-codec-http2:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-codec-http2:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "319f66f3ab0d3aac3477febf19c259990ee8c639fc7da8822dfa58e7dab1bdcf",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_common"):
jvm_maven_import_external(
name = "io_netty_netty_common",
artifact = "io.netty:netty-common:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-common:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "122931117eacf370b054d0e8a2411efa81de4956a6c3f938b0f0eb915969a425",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_handler"):
jvm_maven_import_external(
name = "io_netty_netty_handler",
artifact = "io.netty:netty-handler:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-handler:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "035616801fe9894ca2490832cf9976536dac740f41e90de1cdd4ba46f04263d1",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_resolver"):
jvm_maven_import_external(
name = "io_netty_netty_resolver",
artifact = "io.netty:netty-resolver:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-resolver:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "774221ed4c130b532865770b10630bc12d0d400127da617ee0ac8de2a7ac2097",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_netty_netty_transport"):
jvm_maven_import_external(
name = "io_netty_netty_transport",
artifact = "io.netty:netty-transport:4.1.34.Final",
tags = ["maven_coordinates=io.netty:netty-transport:4.1.34.Final"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "2b3f7d3a595101def7d411793a675bf2a325964475fd7bdbbe448e908de09445",
exports = ["@io_netty_netty_common//jar"],
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("junit_junit"):
jvm_maven_import_external(
name = "junit_junit",
artifact = "junit:junit:4.13",
tags = ["maven_coordinates=junit:junit:4.13"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "4b8532f63bdc0e0661507f947eb324a954d1dbac631ad19c8aa9a00feed1d863",
licenses = ["notice"], # EPL 1.0
)
if not native.existing_rule("com_google_api_grpc_proto_google_common_protos"):
jvm_maven_import_external(
name = "com_google_api_grpc_proto_google_common_protos",
artifact = "com.google.api.grpc:proto-google-common-protos:1.12.0",
tags = ["maven_coordinates=com.google.api.grpc:proto-google-common-protos:1.12.0"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "bd60cd7a423b00fb824c27bdd0293aaf4781be1daba6ed256311103fb4b84108",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_grpc_grpc_context"):
jvm_maven_import_external(
name = "io_grpc_grpc_context",
artifact = "io.grpc:grpc-context:1.18.0",
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "12bc83b9fa3aa7550d75c4515b8ae74f124ba14d3692a5ef4737a2e855cbca2f",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_grpc_grpc_core"):
jvm_maven_import_external(
name = "io_grpc_grpc_core",
artifact = "io.grpc:grpc-core:1.18.0",
server_urls = ["https://repo1.maven.org/maven2"],
licenses = ["notice"], # Apache 2.0
runtime_deps = [
"@io_opencensus_opencensus_api//jar",
"@io_opencensus_opencensus_contrib_grpc_metrics//jar",
],
artifact_sha256 = "fcc02e49bb54771af51470e85611067a8b6718d0126af09da34bbb1e12096f5f",
)
if not native.existing_rule("io_grpc_grpc_netty"):
jvm_maven_import_external(
name = "io_grpc_grpc_netty",
artifact = "io.grpc:grpc-netty:1.18.0",
server_urls = ["https://repo1.maven.org/maven2"],
licenses = ["notice"], # Apache 2.0
runtime_deps = [
"@io_netty_netty_buffer//jar",
"@io_netty_netty_codec//jar",
"@io_netty_netty_codec_http//jar",
"@io_netty_netty_handler//jar",
"@io_netty_netty_resolver//jar",
"@io_netty_netty_transport//jar",
],
artifact_sha256 = "9954db681d8a80c143603712bcf85ab9c76284fb5817b0253bba9ea773bb6803",
deps = [
"@io_netty_netty_codec_http2//jar",
],
)
if not native.existing_rule("io_grpc_grpc_stub"):
jvm_maven_import_external(
name = "io_grpc_grpc_stub",
artifact = "io.grpc:grpc-stub:1.18.0",
server_urls = ["https://repo1.maven.org/maven2"],
licenses = ["notice"], # Apache 2.0
artifact_sha256 = "6509fbbcf953f9c426f891021279b2fb5fb21a27c38d9d9ef85fc081714c2450",
)
if not native.existing_rule("io_grpc_grpc_protobuf"):
jvm_maven_import_external(
name = "io_grpc_grpc_protobuf",
artifact = "io.grpc:grpc-protobuf:1.18.0",
artifact_sha256 = "ab714cf4fec2c588f9d8582c2844485c287afa2a3a8da280c62404e312b2d2b1",
server_urls = ["https://repo1.maven.org/maven2"],
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_grpc_grpc_protobuf_lite"):
jvm_maven_import_external(
name = "io_grpc_grpc_protobuf_lite",
artifact = "io.grpc:grpc-protobuf-lite:1.18.0",
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "108a16c2b70df636ee78976916d6de0b8f393b2b45b5b62909fc03c1a928ea9b",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("javax_annotation_javax_annotation_api"):
jvm_maven_import_external(
name = "javax_annotation_javax_annotation_api",
artifact = "javax.annotation:javax.annotation-api:1.2",
tags = ["maven_coordinates=javax.annotation:javax.annotation-api:1.2"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "5909b396ca3a2be10d0eea32c74ef78d816e1b4ead21de1d78de1f890d033e04",
licenses = ["reciprocal"], # CDDL License
)
if not native.existing_rule("io_opencensus_opencensus_api"):
jvm_maven_import_external(
name = "io_opencensus_opencensus_api",
artifact = "io.opencensus:opencensus-api:0.21.0",
tags = ["maven_coordinates=io.opencensus:opencensus-api:0.21.0"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "8e2cb0f6391d8eb0a1bcd01e7748883f0033b1941754f4ed3f19d2c3e4276fc8",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("io_opencensus_opencensus_contrib_grpc_metrics"):
jvm_maven_import_external(
name = "io_opencensus_opencensus_contrib_grpc_metrics",
artifact = "io.opencensus:opencensus-contrib-grpc-metrics:0.21.0",
tags = ["maven_coordinates=io.opencensus:opencensus-contrib-grpc-metrics:0.21.0"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "29fc79401082301542cab89d7054d2f0825f184492654c950020553ef4ff0ef8",
licenses = ["notice"], # Apache 2.0
)
# Auto common
if not native.existing_rule("com_google_auto_common"):
java_import_external(
name = "com_google_auto_common",
jar_sha256 = "b876b5fddaceeba7d359667f6c4fb8c6f8658da1ab902ffb79ec9a415deede5f",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/auto-common/0.10/auto-common-0.10.jar",
"https://repo1.maven.org/maven2/com/google/auto/auto-common/0.10/auto-common-0.10.jar",
],
licenses = ["notice"], # Apache 2.0
deps = ["@com_google_guava_guava//jar"],
)
# Auto service
if not native.existing_rule("com_google_auto_service"):
java_import_external(
name = "com_google_auto_service",
jar_sha256 = "46808c92276b4c19e05781963432e6ab3e920b305c0e6df621517d3624a35d71",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/service/auto-service/1.0-rc2/auto-service-1.0-rc2.jar",
"https://repo1.maven.org/maven2/com/google/auto/service/auto-service/1.0-rc2/auto-service-1.0-rc2.jar",
],
licenses = ["notice"], # Apache 2.0
neverlink = True,
generated_rule_name = "compile",
generated_linkable_rule_name = "processor",
deps = [
"@com_google_auto_common",
"@com_google_guava_guava//jar",
],
extra_build_file_content = "\n".join([
"java_plugin(",
" name = \"AutoServiceProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.service.processor.AutoServiceProcessor\",",
" deps = [\":processor\"],",
")",
"",
"java_library(",
" name = \"com_google_auto_service\",",
" exported_plugins = [\":AutoServiceProcessor\"],",
" exports = [\":compile\"],",
")",
]),
)
# Auto value
if not native.existing_rule("com_google_auto_value"):
# AutoValue 1.6+ shades Guava, Auto Common, and JavaPoet. That's OK
# because none of these jars become runtime dependencies.
java_import_external(
name = "com_google_auto_value",
jar_sha256 = "8320edb037b62d45bc05ae4e1e21941255ef489e950519ef14d636d66870da64",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/value/auto-value/1.7.4/auto-value-1.7.4.jar",
"https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.7.4/auto-value-1.7.4.jar",
],
licenses = ["notice"], # Apache 2.0
generated_rule_name = "processor",
exports = ["@com_google_auto_value_annotations"],
extra_build_file_content = "\n".join([
"java_plugin(",
" name = \"AutoAnnotationProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.value.processor.AutoAnnotationProcessor\",",
" tags = [\"annotation=com.google.auto.value.AutoAnnotation;genclass=${package}.AutoAnnotation_${outerclasses}${classname}_${methodname}\"],",
" deps = [\":processor\"],",
")",
"",
"java_plugin(",
" name = \"AutoOneOfProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.value.processor.AutoOneOfProcessor\",",
" tags = [\"annotation=com.google.auto.value.AutoValue;genclass=${package}.AutoOneOf_${outerclasses}${classname}\"],",
" deps = [\":processor\"],",
")",
"",
"java_plugin(",
" name = \"AutoValueProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.value.processor.AutoValueProcessor\",",
" tags = [\"annotation=com.google.auto.value.AutoValue;genclass=${package}.AutoValue_${outerclasses}${classname}\"],",
" deps = [\":processor\"],",
")",
"",
"java_library(",
" name = \"com_google_auto_value\",",
" exported_plugins = [",
" \":AutoAnnotationProcessor\",",
" \":AutoOneOfProcessor\",",
" \":AutoValueProcessor\",",
" ],",
" exports = [\"@com_google_auto_value_annotations\"],",
")",
]),
)
# Auto value annotations
if not native.existing_rule("com_google_auto_value_annotations"):
java_import_external(
name = "com_google_auto_value_annotations",
jar_sha256 = "fedd59b0b4986c342f6ab2d182f2a4ee9fceb2c7e2d5bdc4dc764c92394a23d3",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.6/auto-value-annotations-1.7.4.jar",
"https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.7.4/auto-value-annotations-1.7.4.jar",
],
licenses = ["notice"], # Apache 2.0
neverlink = True,
default_visibility = ["@com_google_auto_value//:__pkg__"],
)
# Joda Time
if not native.existing_rule("joda_time"):
jvm_maven_import_external(
name = "joda_time",
artifact = "joda-time:joda-time:2.3",
tags = ["maven_coordinates=joda-time:joda-time:2.3"],
server_urls = ["https://repo1.maven.org/maven2"],
artifact_sha256 = "602fd8006641f8b3afd589acbd9c9b356712bdcf0f9323557ec8648cd234983b",
licenses = ["notice"], # Apache 2.0
)
if not native.existing_rule("native_utils"):
http_archive(
name = "native_utils",
url = "https://github.com/adamheinrich/native-utils/archive/e6a39489662846a77504634b6fafa4995ede3b1d.tar.gz",
sha256 = "6013c0988ba40600e238e47088580fd562dcecd4afd3fcf26130efe7cb1620de",
strip_prefix = "native-utils-e6a39489662846a77504634b6fafa4995ede3b1d",
build_file_content = """licenses(["notice"]) # MIT
java_library(
name = "native_utils",
visibility = ["//visibility:public"],
srcs = glob(["src/main/java/cz/adamh/utils/*.java"]),
)""",
)
# GoogleTest/GoogleMock framework. Used by most unit-tests.
if not native.existing_rule("com_google_googletest"):
# How to update:
# Googletest generally just does daily (or even subdaily) releases along
# with occasional numbered releases.
#
# https://github.com/google/googletest/commits/master
# pick a recent release.
# Hit the 'clipboard with a left arrow' icon to copy the commit hex
# COMMIT=<paste commit hex>
# PREFIX=googletest-
# REPO=https://github.com/google/googletest/archive/
# URL=${REPO}/${COMMIT}.tar.gz
# wget $URL
# SHA256=$(sha256sum ${COMMIT}.tar.gz | cut -f1 -d' ')
# rm ${COMMIT}.tar.gz
# echo \# Commit from $(date --iso-8601=date)
# echo url = \"$URL\",
# echo sha256 = \"$SHA256\",
# echo strip_prefix = \"${PREFIX}${COMMIT}\",
#
http_archive(
name = "com_google_googletest",
# Commit from 2020-02-21
url = "https://github.com/google/googletest/archive//6f5fd0d7199b9a19faa9f499ecc266e6ae0329e7.tar.gz",
sha256 = "51e6c4b4449aab8f31e69d0ff89565f49a1f3628a42e24f214e8b02b3526e3bc",
strip_prefix = "googletest-6f5fd0d7199b9a19faa9f499ecc266e6ae0329e7",
)
# Google Benchmark framework. Used by benchmark tests.
if not native.existing_rule("com_github_google_benchmark"):
http_archive(
name = "com_github_google_benchmark",
url = "https://github.com/google/benchmark/archive/v1.5.1.tar.gz",
sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2",
strip_prefix = "benchmark-1.5.1",
)
# RE2 Regex Framework, mostly used in unit tests.
if not native.existing_rule("com_googlesource_code_re2"):
http_archive(
name = "com_googlesource_code_re2",
urls = [
"https://github.com/google/re2/archive/d1394506654e0a19a92f3d8921e26f7c3f4de969.tar.gz",
],
sha256 = "ac855fb93dfa6878f88bc1c399b9a2743fdfcb3dc24b94ea9a568a1c990b1212",
strip_prefix = "re2-d1394506654e0a19a92f3d8921e26f7c3f4de969",
)
# Jinja2.
if not native.existing_rule("jinja"):
http_archive(
name = "jinja",
# Jinja release 2.10
url = "https://github.com/pallets/jinja/archive/2.10.tar.gz",
strip_prefix = "jinja-2.10",
sha256 = "0d31d3466c313a9ca014a2d904fed18cdac873a5ba1f7b70b8fd8b206cd860d6",
build_file_content = """py_library(
name = "jinja2",
visibility = ["//visibility:public"],
srcs = glob(["jinja2/*.py"]),
deps = ["@markupsafe//:markupsafe"],
)""",
)
# Json.
if not native.existing_rule("json"):
http_archive(
name = "json",
# JSON for Modern C++
url = "https://github.com/nlohmann/json/archive/v3.7.3.zip",
strip_prefix = "json-3.7.3",
sha256 = "e109cd4a9d1d463a62f0a81d7c6719ecd780a52fb80a22b901ed5b6fe43fb45b",
build_file_content = """cc_library(
name="json",
visibility=["//visibility:public"],
hdrs=["single_include/nlohmann/json.hpp"]
)""",
)
if not native.existing_rule("markupsafe"):
http_archive(
name = "markupsafe",
urls = [
"https://github.com/pallets/markupsafe/archive/1.0.tar.gz",
],
sha256 = "dc3938045d9407a73cf9fdd709e2b1defd0588d50ffc85eb0786c095ec846f15",
strip_prefix = "markupsafe-1.0/markupsafe",
build_file_content = """py_library(
name = "markupsafe",
visibility = ["//visibility:public"],
srcs = glob(["*.py"])
)""",
)
if not native.existing_rule("google_bazel_common"):
http_archive(
name = "google_bazel_common",
strip_prefix = "bazel-common-e768dbfea5bac239734b3f59b2a1d7464c6dbd26",
urls = ["https://github.com/google/bazel-common/archive/e768dbfea5bac239734b3f59b2a1d7464c6dbd26.zip"],
sha256 = "17f66ba76073a290add024a4ce7f5f92883832b7da85ffd7677e1f5de9a36153",
)
if not native.existing_rule("org_publicsuffix"):
http_archive(
name = "org_publicsuffix",
strip_prefix = "list-d111481d5931f704c1d9d3a50af19e4e34fc5ba3",
urls = ["https://github.com/publicsuffix/list/archive/d111481d5931f704c1d9d3a50af19e4e34fc5ba3.zip"],
sha256 = "2f84929af28e2b712a235ab544fbb4dd7bd5d075ac351de0723915e528c99a38",
build_file_content = """licenses(["reciprocal"])
exports_files([
"LICENSE",
"public_suffix_list.dat",
"tests/test_psl.txt",
],
visibility = ["//visibility:public"]
)
alias(
name = "test_psl.txt",
actual = "tests/test_psl.txt",
visibility = ["//visibility:public"]
)
""",
)
##########################################################################
# Rules which depend on rules_foreign_cc
#
# These require a "./configure && make" style build and depend on an
# experimental project to allow building from source with non-bazel
# build systems.
#
# All of these archives basically just create filegroups and separate
# BUILD files introduce the relevant rules.
##########################################################################
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
bison_build_file_content = all_content + """
filegroup(
name = "bison_runtime_data",
srcs = glob(["data/**/*"]),
output_licenses = ["unencumbered"],
path = "data",
visibility = ["//visibility:public"],
)
exports_files(["data"])
"""
http_archive(
name = "bison",
build_file_content = bison_build_file_content,
strip_prefix = "bison-3.6.2",
sha256 = "e28ed3aad934de2d1df68be209ac0b454f7b6d3c3d6d01126e5cd2cbadba089a",
urls = [
"https://ftp.gnu.org/gnu/bison/bison-3.6.2.tar.gz",
"https://mirrors.kernel.org/gnu/bison/bison-3.6.2.tar.gz",
],
)
http_archive(
name = "flex",
build_file_content = all_content,
strip_prefix = "flex-2.6.4",
sha256 = "e87aae032bf07c26f85ac0ed3250998c37621d95f8bd748b31f15b33c45ee995",
urls = ["https://github.com/westes/flex/releases/download/v2.6.4/flex-2.6.4.tar.gz"],
patches = ["@com_google_zetasql//bazel:flex.patch"],
)
if not native.existing_rule("m4"):
http_archive(
name = "m4",
build_file_content = all_content,
strip_prefix = "m4-1.4.18",
sha256 = "ab2633921a5cd38e48797bf5521ad259bdc4b979078034a3b790d7fec5493fab",
urls = [
"https://ftp.gnu.org/gnu/m4/m4-1.4.18.tar.gz",
"https://mirrors.kernel.org/gnu/m4/m4-1.4.18.tar.gz",
],
patches = ["@com_google_zetasql//bazel:m4.patch"],
)
http_archive(
name = "icu",
build_file = "@com_google_zetasql//bazel:icu.BUILD",
strip_prefix = "icu",
sha256 = "53e37466b3d6d6d01ead029e3567d873a43a5d1c668ed2278e253b683136d948",
urls = ["https://github.com/unicode-org/icu/releases/download/release-65-1/icu4c-65_1-src.tgz"],
patches = ["@com_google_zetasql//bazel:icu4c-64_2.patch"],
)
|
the-stack_0_25797
|
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
import os
import sys
import json
import asyncio
import socks
import shutil
import argparse
from telethon import TelegramClient, events
from telethon.tl.functions.messages import GetMessagesRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.functions.photos import GetUserPhotosRequest
from telethon.tl.types import MessageService, MessageEmpty
from telethon.tl.types import PeerUser, PeerChat
from telethon.errors.rpcerrorlist import AccessTokenExpiredError, RpcCallFailError
from telethon.tl.types import MessageMediaGeo, MessageMediaPhoto, MessageMediaDocument, MessageMediaContact
from telethon.tl.types import DocumentAttributeFilename, DocumentAttributeAudio, DocumentAttributeVideo, MessageActionChatEditPhoto
API_ID = 0
API_HASH = ''
# messages count per cycle. it's optimal value, seriously
HISTORY_DUMP_STEP = 200
# lookahead counter, useful when supposedly incomplete history
# you can increase it
LOOKAHEAD_STEP_COUNT = 0
def print_bot_info(bot_info):
print(f"ID: {bot_info.id}")
print(f"Name: {bot_info.first_name}")
print(f"Username: @{bot_info.username} - https://t.me/{bot_info.username}")
def print_user_info(user_info):
print("="*20 + f"\nNEW USER DETECTED: {user_info.id}")
print(f"First name: {user_info.first_name}")
print(f"Last name: {user_info.last_name}")
if user_info.username:
print(f"Username: @{user_info.username} - https://t.me/{user_info.username}")
else:
print("User has no username")
def save_user_info(user):
user_id = str(user.id)
user_dir = os.path.join(base_path, user_id)
if not os.path.exists(user_dir):
os.mkdir(user_dir)
user_media_dir = os.path.join(base_path, user_id, 'media')
if not os.path.exists(user_media_dir):
os.mkdir(user_media_dir)
json.dump(user.to_dict(), open(os.path.join(user_dir, f'{user_id}.json'), 'w'))
async def safe_api_request(coroutine, comment):
result = None
try:
result = await coroutine
except RpcCallFailError as e:
print(f"Telegram API error, {comment}: {str(e)}")
except Exception as e:
print(f"Some error, {comment}: {str(e)}")
return result
#TODO: save group photos
async def save_user_photos(user):
user_id = str(user.id)
user_dir = os.path.join(base_path, user_id)
result = await safe_api_request(bot(GetUserPhotosRequest(user_id=user.id,offset=0,max_id=0,limit=100)), 'get user photos')
if not result:
return
for photo in result.photos:
print(f"Saving photo {photo.id}...")
await safe_api_request(bot.download_file(photo, os.path.join(user_dir, f'{photo.id}.jpg')), 'download user photo')
async def save_media_photo(chat_id, photo):
user_media_dir = os.path.join(base_path, chat_id, 'media')
await safe_api_request(bot.download_file(photo, os.path.join(user_media_dir, f'{photo.id}.jpg')), 'download media photo')
def get_document_filename(document):
for attr in document.attributes:
if isinstance(attr, DocumentAttributeFilename):
return attr.file_name
# voice & round video
if isinstance(attr, DocumentAttributeAudio) or isinstance(attr, DocumentAttributeVideo):
return f'{document.id}.{document.mime_type.split("/")[1]}'
async def save_media_document(chat_id, document):
user_media_dir = os.path.join(base_path, chat_id, 'media')
filename = os.path.join(user_media_dir, get_document_filename(document))
if os.path.exists(filename):
old_filename, extension = os.path.splitext(filename)
filename = f'{old_filename}_{document.id}{extension}'
await safe_api_request(bot.download_file(document, filename), 'download file')
return filename
def remove_old_text_history(chat_id):
user_dir = os.path.join(base_path, str(chat_id))
history_filename = os.path.join(user_dir, f'{chat_id}_history.txt')
if os.path.exists(history_filename):
print(f"Removing old history of {chat_id}...")
os.remove(history_filename)
def save_text_history(chat_id, messages):
user_dir = os.path.join(base_path, str(chat_id))
if not os.path.exists(user_dir):
os.mkdir(user_dir)
history_filename = os.path.join(user_dir, f'{chat_id}_history.txt')
with open(history_filename, 'a', encoding='utf-8') as text_file:
text_file.write('\n'.join(messages)+'\n')
def save_chats_text_history():
for m_chat_id, messages_dict in messages_by_chat.items():
print(f"Saving history of {m_chat_id} as a text...")
new_messages = messages_dict['buf']
save_text_history(m_chat_id, new_messages)
messages_by_chat[m_chat_id]['history'] += new_messages
messages_by_chat[m_chat_id]['buf'] = []
async def get_chat_history(from_id=0, to_id=0, chat_id=None, lookahead=0):
print(f'Dumping history from {from_id} to {to_id}...')
messages = await bot(GetMessagesRequest(range(to_id, from_id)))
empty_message_counter = 0
history_tail = True
for m in messages.messages:
if isinstance(m.to_id, PeerUser):
m_chat_id = str(m.to_id.user_id) if int(m.from_id) == int(bot_id) else str(m.from_id)
elif isinstance(m.to_id, PeerChat):
m_chat_id = str(m.to_id.chat_id)
if isinstance(m, MessageEmpty):
empty_message_counter += 1
continue
elif empty_message_counter:
print(f'Empty messages x{empty_message_counter}')
empty_message_counter = 0
history_tail = False
message_text = ''
if m.media:
if isinstance(m.media, MessageMediaGeo):
message_text = f'Geoposition: {m.media.geo.long}, {m.media.geo.lat}'
elif isinstance(m.media, MessageMediaPhoto):
await save_media_photo(m_chat_id, m.media.photo)
message_text = f'Photo: media/{m.media.photo.id}.jpg'
elif isinstance(m.media, MessageMediaContact):
message_text = f'Vcard: phone {m.media.phone_number}, {m.media.first_name} {m.media.last_name}, rawdata {m.media.vcard}'
elif isinstance(m.media, MessageMediaDocument):
full_filename = await save_media_document(m_chat_id, m.media.document)
filename = os.path.split(full_filename)[-1]
message_text = f'Document: media/{filename}'
else:
print(m.media)
#TODO: add other media description
else:
if isinstance(m.action, MessageActionChatEditPhoto):
await save_media_photo(m_chat_id, m.action.photo)
message_text = f'Photo of chat was changed: media/{m.action.photo.id}.jpg'
elif m.action:
message_text = str(m.action)
if isinstance(m, MessageService):
#TODO: add text
pass
if m.message:
message_text = '\n'.join([message_text, m.message]).strip()
text = f'[{m.id}][{m.from_id}][{m.date}] {message_text}'
print(text)
if not m_chat_id in messages_by_chat:
messages_by_chat[m_chat_id] = {'buf': [], 'history': []}
messages_by_chat[m_chat_id]['buf'].append(text)
if m.from_id not in all_users:
full_user = await bot(GetFullUserRequest(m.from_id))
user = full_user.user
print_user_info(user)
save_user_info(user)
remove_old_text_history(m.from_id)
await save_user_photos(user)
all_users[m.from_id] = user
if empty_message_counter:
print(f'Empty messages x{empty_message_counter}')
history_tail = True
save_chats_text_history()
if not history_tail:
return await get_chat_history(from_id+HISTORY_DUMP_STEP, to_id+HISTORY_DUMP_STEP, chat_id, lookahead)
else:
if lookahead:
return await get_chat_history(from_id+HISTORY_DUMP_STEP, to_id+HISTORY_DUMP_STEP, chat_id, lookahead-1)
else:
print('History was fully dumped.')
print('Press Ctrl+C to stop live waiting for new messages...')
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--token", help="Telegram bot token to check")
parser.add_argument("--lookahead", help="Additional cycles to skip empty messages",
default=LOOKAHEAD_STEP_COUNT, type=int)
parser.add_argument("--tor", help="enable Tor socks proxy", action="store_true")
args = parser.parse_args()
proxy = (socks.SOCKS5, '127.0.0.1', 9050) if args.tor else None
all_chats = {}
all_users = {}
messages_by_chat = {}
bot_token = input("Enter token bot:") if not args.token else args.token
bot_id = bot_token.split(':')[0]
base_path = bot_id
if os.path.exists(base_path):
print(f"Bot {bot_id} info was dumped earlier, will be rewrited!")
first_launch = False
else:
os.mkdir(base_path)
first_launch = True
# advantages of Telethon using for bots: https://github.com/telegram-mtproto/botapi-comparison
try:
bot = TelegramClient(os.path.join(base_path, bot_id), API_ID, API_HASH, proxy=proxy).start(bot_token=bot_token)
except AccessTokenExpiredError as e:
print("Token has expired!")
if first_launch:
shutil.rmtree(base_path)
sys.exit()
@bot.on(events.NewMessage)
async def save_new_user_history(event):
#TODO: old messages processing
user = event.message.sender
chat_id = event.message.chat_id
if not chat_id in all_chats:
all_chats[chat_id] = event.message.input_chat
messages_by_chat[chat_id] = {'history': [], 'buf': []}
#TODO: chat name display
print('='*20 + f'\nNEW CHAT DETECTED: {chat_id}')
if user.id not in all_users:
print_user_info(user)
save_user_info(user)
await save_user_photos(user)
print(event.message)
# TODO: new messages saving
loop = asyncio.get_event_loop()
me = loop.run_until_complete(bot.get_me())
print_bot_info(me)
user = loop.run_until_complete(bot(GetFullUserRequest(me)))
all_users[me.id] = user
user_info = user.user.to_dict()
user_info['token'] = bot_token
with open(os.path.join(base_path, 'bot.json'), 'w') as bot_info_file:
json.dump(user_info, bot_info_file)
loop.run_until_complete(get_chat_history(from_id=HISTORY_DUMP_STEP, to_id=0, lookahead=args.lookahead))
bot.run_until_disconnected()
|
the-stack_0_25800
|
from paymentrails.configuration import Configuration
from paymentrails.gateway import Gateway
class RecipientAccount:
"""
A class that facilitates Client requests to
the PaymentRails API in regards to Recipient Accounts.
"""
_attributes = {
"id": "",
"primary": "",
"currency": "",
"recipientAccountId": "",
"routeType": "",
"recipientFees": "",
"emailAddress": "",
"country": "",
"type": "",
"iban": "",
"accountNum": "",
"accountHolderName": "",
"swiftBic": "",
"branchId": "",
"bankId": "",
"bankName": "",
"bankAddress": "",
"bankCity": "",
"bankRegionCode": "",
"bankPostalCode": ""
}
@staticmethod
def findAll(recipient_id):
"""
Retrieve all the recipient accounts
A recipient_id is required::
RecipientAccount.findAll('R-fjeracjmuflh')
"""
config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)
return Gateway(config).recipient_account.findAll(recipient_id)
@staticmethod
def find(recipient_id, recipient_account_id):
"""
Retrieve a recipient account
A recipient_id and recipient_account_id are required::
RecipientAccount.find('R-fjeracjmuflh','A-2DQMpN4jurTFn9gRxobx4C')
"""
config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)
return Gateway(config).recipient_account.find(recipient_id, recipient_account_id)
@staticmethod
def create(recipient_id, body):
"""
Create a recipient account
A recipient_id and body are required::
RecipientAccount.create('R-4625iLug2GKqKZG2WzAf3e','payload')
"""
config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)
return Gateway(config).recipient_account.create(recipient_id, body)
@staticmethod
def update(recipient_id, recipient_account_id, body):
"""
Update a recipient account
A recipient_id, recipient_account_id, and body are required::
RecipientAccount.update('R-fjeracjmuflh','A-2DQMpN4jurTFn9gRxobx4C',
{"accountHolderName": "Acer Philips"})
"""
config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)
return Gateway(config).recipient_account.update(recipient_id,
recipient_account_id, body)
@staticmethod
def delete(recipient_id, recipient_account_id):
"""
Delete a recipient account
A recipient_id and recipient_account_id are required::
RecipientAccount.delete('R-fjeracjmuflh','A-2DQMpN4jurTFn9gRxobx4C')
"""
config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)
return Gateway(config).recipient_account.delete(recipient_id, recipient_account_id)
@staticmethod
def _initialize(attributes):
fields = [
"id",
"primary",
"currency",
"recipientAccountId",
"routeType",
"recipientFees",
"emailAddress",
"country",
"type",
"iban",
"accountNum",
"accountHolderName",
"swiftBic",
"branchId",
"bankId",
"bankName",
"bankAddress",
"bankCity",
"bankRegionCode",
"bankPostalCode",
]
for field in fields:
if attributes.get('account') is None:
RecipientAccount._attributes[field] = attributes.get(field)
elif attributes.get('account') is not None:
RecipientAccount._attributes[field] = attributes['account'].get(field)
return RecipientAccount._attributes
@staticmethod
def factory(attributes):
instance = RecipientAccount._initialize(attributes)
return instance
|
the-stack_0_25801
|
import numpy as np
import cv2
from demo_helpers import config, capture_image, get_detection, calculate_frame_speed, decode_mobilenet_ssd, show_mobilenet_ssd
from time import time, sleep, monotonic
import os
import depthai
print('Using depthai module from: ', depthai.__file__)
# Create a list of enabled streams ()
stream_names = ['metaout', 'previewout']
device = depthai.Device('', False)
# create the pipeline, here is the first connection with the device
p = device.create_pipeline(config=config)
if p is None:
print('Pipeline is not created.')
exit(3)
while True:
# retreive data from the device
# data is stored in packets, there are nnet (Neural NETwork) packets which have additional functions for NNet result interpretation
nnet_packets, data_packets = p.get_available_nnet_and_data_packets(True)
ret, frame = capture_image(data_packets)
nnet_prev = get_detection(nnet_packets)
if ret:
frame_count = calculate_frame_speed()
nn_frame = show_mobilenet_ssd(nnet_prev["entries_prev"]['rgb'], frame, is_depth=0)
cv2.putText(nn_frame, "fps: " + str(frame_count), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 50, 50),2)
cv2.imshow("Mask detection", nn_frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
del p # in order to stop the pipeline object should be deleted, otherwise device will continue working. This is required if you are going to add code after the main loop, otherwise you can ommit it.
device.deinit_device()
print('py: DONE.')
|
the-stack_0_25803
|
"""
Main API for Vega-lite spec generation.
DSL mapping Vega types to IPython traitlets.
"""
import six
import pandas as pd
from .schema import core, channels, Undefined
from .data import data_transformers, pipe
from ... import utils
from .display import renderers
SCHEMA_URL = "https://vega.github.io/schema/vega-lite/v1.json"
def _get_channels_mapping():
mapping = {}
for attr in dir(channels):
cls = getattr(channels, attr)
if isinstance(cls, type) and issubclass(cls, core.SchemaBase):
mapping[cls] = attr.lower()
return mapping
#*************************************************************************
# Formula wrapper
# - makes field a required first argument of initialization
# - allows expr trait to be an Expression and processes it properly
#*************************************************************************
class Formula(core.Formula):
def __init__(self, field, expr=Undefined, **kwargs):
super(Formula, self).__init__(field=field, expr=expr, **kwargs)
#*************************************************************************
# Top-level Objects
#*************************************************************************
class TopLevelMixin(object):
_default_spec_values = {"width": 400, "height": 300}
_class_is_valid_at_instantiation = False
def _prepare_data(self):
if isinstance(self.data, (dict, core.Data)):
pass
elif isinstance(self.data, pd.DataFrame):
self.data = pipe(self.data, data_transformers.get())
elif isinstance(self.data, six.string_types):
self.data = core.Data(url=self.data)
def to_dict(self, *args, **kwargs):
copy = self.copy()
original_data = getattr(copy, 'data', Undefined)
copy._prepare_data()
# We make use of two context markers:
# - 'data' points to the data that should be referenced for column type
# inference.
# - 'top_level' is a boolean flag that is assumed to be true; if it's
# true then a "$schema" arg is added to the dict.
context = kwargs.get('context', {}).copy()
is_top_level = context.get('top_level', True)
context['top_level'] = False
if original_data is not Undefined:
context['data'] = original_data
kwargs['context'] = context
try:
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
except jsonschema.ValidationError:
dct = None
# If we hit an error, then re-convert with validate='deep' to get
# a more useful traceback. We don't do this by default because it's
# much slower in the case that there are no errors.
if dct is None:
kwargs['validate'] = 'deep'
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
if is_top_level:
# since this is top-level we add $schema if it's missing
if '$schema' not in dct:
dct['$schema'] = SCHEMA_URL
# add default values if present
if copy._default_spec_values:
dct = utils.update_nested(copy._default_spec_values, dct, copy=True)
return dct
def savechart(self, fp, format=None, **kwargs):
"""Save a chart to file in a variety of formats
Supported formats are json, html, png, svg
Parameters
----------
fp : string filename or file-like object
file in which to write the chart.
format : string (optional)
the format to write: one of ['json', 'html', 'png', 'eps'].
If not specified, the format will be determined from the filename.
**kwargs :
Additional keyword arguments are passed to the output method
associated with the specified format.
"""
if isinstance(fp, six.string_types):
format = fp.split('.')[-1]
if format is None:
raise ValueError("must specify file format: "
"['png', 'eps', 'html', 'json']")
elif format == 'json':
utils.write_file_or_filename(fp, self.to_json(**kwargs), mode='w')
elif format == 'html':
from .html import HTML_TEMPLATE
opt = dict(renderer=kwargs.pop('renderer', 'canvas'),
actions=kwargs.pop('actions', False))
if opt['renderer'] not in ('canvas', 'svg'):
raise ValueError("renderer must be 'canvas' or 'svg'")
spec_html = HTML_TEMPLATE.format(spec=self.to_json(**kwargs),
opt=json.dumps(opt))
utils.write_file_or_filename(fp, spec_html, mode='w')
elif format in ['png', 'svg']:
raise NotImplementedError("saving of v1 specs")
utils.save_spec(self.to_dict(), fp, format=format, **kwargs)
else:
raise ValueError("unrecognized format: '{0}'".format(format))
# transform method
@utils.use_signature(core.Transform)
def transform_data(self, **kwargs):
"""Set the data transform by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, 'transform', **kwargs)
# Configuration methods
@utils.use_signature(core.Config)
def configure(self, **kwargs):
"""Set chart configuration"""
copy = self.copy()
return utils.update_subtraits(copy, 'config', **kwargs)
@utils.use_signature(core.AxisConfig)
def configure_axis(self, **kwargs):
"""Configure the chart's axes by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'axis'), **kwargs)
@utils.use_signature(core.CellConfig)
def configure_cell(self, **kwargs):
"""Configure the chart's cell's by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'cell'), **kwargs)
@utils.use_signature(core.LegendConfig)
def configure_legend(self, **kwargs):
"""Configure the chart's legend by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'legend'), **kwargs)
@utils.use_signature(core.OverlayConfig)
def configure_overlay(self, **kwargs):
"""Configure the chart's overlay by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'overlay'), **kwargs)
@utils.use_signature(core.MarkConfig)
def configure_mark(self, **kwargs):
"""Configure the chart's marks by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'mark'), **kwargs)
@utils.use_signature(core.ScaleConfig)
def configure_scale(self, **kwargs):
"""Configure the chart's scales by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'scale'), **kwargs)
@utils.use_signature(core.FacetConfig)
def configure_facet(self, **kwargs):
"""Configure the chart's scales by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'facet'), **kwargs)
@utils.use_signature(core.AxisConfig)
def configure_facet_axis(self, **kwargs):
"""Configure the facet's axes by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'facet', 'axis'),
**kwargs)
@utils.use_signature(core.CellConfig)
def configure_facet_cell(self, **kwargs):
"""Configure the facet's cells by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'facet', 'cell'),
**kwargs)
@utils.use_signature(core.FacetGridConfig)
def configure_facet_grid(self, **kwargs):
"""Configure the facet's grid by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'facet', 'grid'),
**kwargs)
@utils.use_signature(core.FacetScaleConfig)
def configure_facet_scale(self, **kwargs):
"""Configure the facet's scales by keyword args."""
copy = self.copy()
return utils.update_subtraits(copy, ('config', 'facet', 'scale'),
**kwargs)
# Display related methods
def _repr_mimebundle_(self, include, exclude):
"""Return a MIME bundle for display in Jupyter frontends."""
# Catch errors explicitly to get around issues in Jupyter frontend
# see https://github.com/ipython/ipython/issues/11038
try:
dct = self.to_dict()
except Exception:
utils.display_traceback(in_ipython=True)
return {}
else:
return renderers.get()(dct)
class Chart(TopLevelMixin, core.ExtendedUnitSpec):
def __init__(self, data=Undefined, encoding=Undefined, mark=Undefined,
width=400, height=300, **kwargs):
super(Chart, self).__init__(data=data, encoding=encoding, mark=mark,
width=width, height=height, **kwargs)
@utils.use_signature(core.MarkConfig)
def mark_area(self, **kwargs):
"""Set the mark to 'area' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'area'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_bar(self, **kwargs):
"""Set the mark to 'bar' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'bar'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_errorBar(self, **kwargs):
"""Set the mark to 'errorBar' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'errorBar'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_line(self, **kwargs):
"""Set the mark to 'line' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'line'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_point(self, **kwargs):
"""Set the mark to 'point' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'point'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_rule(self, **kwargs):
"""Set the mark to 'rule' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'rule'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_text(self, **kwargs):
"""Set the mark to 'text' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'text'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_tick(self, **kwargs):
"""Set the mark to 'tick' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'tick'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_circle(self, **kwargs):
"""Set the mark to 'circle' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'circle'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.MarkConfig)
def mark_square(self, **kwargs):
"""Set the mark to 'square' and optionally specify mark properties"""
copy = self.copy()
copy.mark = 'square'
return copy.configure_mark(**kwargs)
@utils.use_signature(core.Encoding)
def encode(self, *args, **kwargs):
"""Define the encoding for the Chart."""
if args:
mapping = _get_channels_mapping()
for arg in args:
encoding = mapping.get(type(arg), None)
if encoding is None:
raise NotImplementedError("non-keyword arg of type {0}"
"".format(type(arg)))
if encoding in kwargs:
raise ValueError("encode: encoding {0} specified twice"
"".format(encoding))
kwargs[encoding] = arg
for prop, field in list(kwargs.items()):
if not isinstance(field, core.SchemaBase):
cls = getattr(channels, prop.title())
# Don't validate now, because field will be computed
# as part of the to_dict() call.
kwargs[prop] = cls.from_dict(field, validate=False)
return utils.update_subtraits(self, 'encoding', **kwargs)
|
the-stack_0_25804
|
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
import json
import os
import os.path
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestSince(WatchmanTestCase.WatchmanTestCase):
def test_sinceIssue1(self):
root = self.mkdtemp()
self.touchRelative(root, "111")
self.touchRelative(root, "222")
self.watchmanCommand("watch", root)
self.assertFileList(root, ["111", "222"])
# Create a cursor for this state
self.watchmanCommand("since", root, "n:foo")
bar_dir = os.path.join(root, "bar")
os.mkdir(bar_dir)
self.touchRelative(bar_dir, "333")
self.waitForSync(root)
# We should not observe 111 or 222
self.assertFileList(root, cursor="n:foo", files=["bar", "bar/333"])
def test_sinceIssue2(self):
root = self.mkdtemp()
watch = self.watchmanCommand("watch", root)
self.assertFileList(root, files=[])
foo_dir = os.path.join(root, "foo")
os.mkdir(foo_dir)
self.touchRelative(foo_dir, "111")
self.waitForSync(root)
self.assertFileList(root, cursor="n:foo", files=["foo", "foo/111"])
bar_dir = os.path.join(foo_dir, "bar")
os.mkdir(bar_dir)
self.touchRelative(bar_dir, "222")
# wait until we observe all the files
self.assertFileList(root, files=["foo", "foo/111", "foo/bar", "foo/bar/222"])
# now check the delta for the since
expected = ["foo/bar", "foo/bar/222"]
files = self.getFileList(root, cursor="n:foo")
if watch["watcher"] in ("portfs", "kqueue", "dirfsevents", "kqueue+fsevents"):
# These systems also show the containing dir as modified
expected.append("foo")
elif watch["watcher"] == "win32":
# the containing directory sometimes(!) shows as modified
# on win32, but the important thing is that the other files
# show up in the list
files = [f for f in files if f != "foo"]
self.assertFileListsEqual(files, expected)
def test_sinceRelativeRoot(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
clock = self.watchmanCommand("clock", root)["clock"]
self.touchRelative(root, "a")
os.mkdir(os.path.join(root, "subdir"))
self.touchRelative(os.path.join(root, "subdir"), "foo")
self.assertFileList(root, files=["a", "subdir", "subdir/foo"])
res = self.watchmanCommand(
"query",
root,
{"since": clock, "relative_root": "subdir", "fields": ["name"]},
)
self.assertFileListsEqual(res["files"], ["foo"])
# touch a file outside the relative root
self.touchRelative(root, "b")
self.assertFileList(root, files=["a", "b", "subdir", "subdir/foo"])
res = self.watchmanCommand(
"query",
root,
{"since": res["clock"], "relative_root": "subdir", "fields": ["name"]},
)
expect = []
# Filter out 'foo' as some operating systems may report
# it and others may not. We're not interested in it here.
self.assertFileListsEqual(filter(lambda x: x != "foo", res["files"]), expect)
# touching just the subdir shouldn't cause anything to show up
self.touchRelative(root, "subdir")
self.waitForSync(root)
res = self.watchmanCommand(
"query",
root,
{"since": res["clock"], "relative_root": "subdir", "fields": ["name"]},
)
self.assertFileListsEqual(res["files"], [])
# touching a new file inside the subdir should cause it to show up
dir2 = os.path.join(root, "subdir", "dir2")
os.mkdir(dir2)
self.touchRelative(dir2, "bar")
self.waitForSync(root)
res = self.watchmanCommand(
"query",
root,
{"since": res["clock"], "relative_root": "subdir", "fields": ["name"]},
)
self.assertFileListsEqual(res["files"], ["dir2", "dir2/bar"])
def assertFreshInstanceForSince(self, root, cursor, empty=False):
res = self.watchmanCommand(
"query",
root,
{"since": cursor, "fields": ["name"], "empty_on_fresh_instance": empty},
)
self.assertTrue(res["is_fresh_instance"])
if empty:
self.assertFileListsEqual(res["files"], [])
else:
self.assertFileListsEqual(res["files"], ["111"])
def test_sinceFreshInstance(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
self.assertFileList(root, [])
self.touchRelative(root, "111")
res = self.watchmanCommand("query", root, {"fields": ["name"]})
self.assertTrue(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["111"])
# relative clock value, fresh instance
self.assertFreshInstanceForSince(root, "c:0:1:0:1", False)
# old-style clock value (implies fresh instance, event if the
# pid is the same)
pid = self.watchmanCommand("get-pid")["pid"]
self.assertFreshInstanceForSince(root, "c:%s:1" % pid, False)
# -- decompose clock and replace elements one by one
clock = self.watchmanCommand("clock", root)["clock"]
p = clock.split(":")
# ['c', startTime, pid, rootNum, ticks]
self.assertEqual(len(p), 5)
# replace start time
self.assertFreshInstanceForSince(
root, ":".join(["c", "0", p[2], p[3], p[4]]), False
)
# replace pid
self.assertFreshInstanceForSince(
root, ":".join(["c", p[1], "1", p[3], p[4]]), False
)
# replace root number (also try empty_on_fresh_instance)
self.assertFreshInstanceForSince(
root, ":".join(["c", p[1], p[2], "0", p[4]]), True
)
# empty_on_fresh_instance, not a fresh instance
self.touchRelative(root, "222")
res = self.watchmanCommand(
"query",
root,
{"since": clock, "fields": ["name"], "empty_on_fresh_instance": True},
)
self.assertFalse(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["222"])
# fresh instance results should omit deleted files
os.unlink(os.path.join(root, "111"))
res = self.watchmanCommand(
"query", root, {"since": "c:0:1:0:1", "fields": ["name"]}
)
self.assertTrue(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["222"])
def test_reAddWatchFreshInstance(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
self.assertFileList(root, [])
self.touchRelative(root, "111")
res = self.watchmanCommand("query", root, {"fields": ["name"]})
self.assertTrue(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["111"])
clock = res["clock"]
os.unlink(os.path.join(root, "111"))
self.watchmanCommand("watch-del", root)
self.watchmanCommand("watch", root)
self.touchRelative(root, "222")
# wait for touch to be observed
self.assertFileList(root, ["222"])
# ensure that our since query is a fresh instance
res = self.watchmanCommand("query", root, {"since": clock, "fields": ["name"]})
self.assertTrue(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["222"])
def test_recrawlFreshInstance(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
self.touchRelative(root, "111")
self.assertFileList(root, ["111"])
res = self.watchmanCommand("query", root, {"fields": ["name"]})
self.assertTrue(res["is_fresh_instance"])
clock = res["clock"]
os.unlink(os.path.join(root, "111"))
self.watchmanCommand("debug-recrawl", root)
self.touchRelative(root, "222")
res = self.watchmanCommand("query", root, {"since": clock, "fields": ["name"]})
# In earlier versions of the server, the recrawl would always
# generate a fresh instance result set. This is no longer true.
self.assertFalse(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["111", "222"])
self.assertRegex(res["warning"], "Recrawled this watch")
def test_recrawlFreshInstanceWarningSuppressed(self):
root = self.mkdtemp()
with open(os.path.join(root, ".watchmanconfig"), "w") as f:
f.write(json.dumps({"suppress_recrawl_warnings": True}))
self.watchmanCommand("watch", root)
self.touchRelative(root, "111")
self.assertFileList(root, [".watchmanconfig", "111"])
res = self.watchmanCommand("query", root, {"fields": ["name"]})
self.assertTrue(res["is_fresh_instance"])
clock = res["clock"]
os.unlink(os.path.join(root, "111"))
self.watchmanCommand("debug-recrawl", root)
self.touchRelative(root, "222")
res = self.watchmanCommand("query", root, {"since": clock, "fields": ["name"]})
# In earlier versions of the server, the recrawl would always
# generate a fresh instance result set. This is no longer true.
self.assertFalse(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["111", "222"])
self.assertTrue("warning" not in res)
|
the-stack_0_25805
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 19:57:02 2015
@author: hoseung
"""
import load
import numpy as np
import utils.sampling as smp
import tree.halomodule as hmo
#wdir = './'
#wdir = '/home/hoseung/Work/data/AGN2/'
wdir = '/home/hoseung/Work/data/Aquarius/'
rscale = 5.0
lmax = 10
#nout = int(input("NOUT? \n"))
nout=193
s = load.sim.Sim()
s.setup(nout, wdir)
hh = hmo.Halo(base=wdir, nout=nout, halofinder='HM', info=s.info)
hh.load()
#%%
i_center = np.where(hh.data['np'] == max(hh.data['np']))[0]
import draw
import utils.util
utils.util.reimport(draw)
#region = smp.set_region_multi(xc=hh.data.x[i_center],
# yc=hh.data.y[i_center],
# zc=hh.data.z[i_center],
# radius = hh.data.rvir * rscale)
region=smp.set_region()
zmi = region["ranges"][2][0]
zma = region["ranges"][2][1]
#n_slices = 100
n_slices = (zma - zmi) * s.info.pboxsize / 0.05 # one slice = 50kpc
dz = (zma - zmi) / n_slices
depth_in_cm = dz * s.info.pboxsize * 3.084e24 # 1Mpc = 3.084e24 cm
npix=2000
ptype = 'gas_den'
import matplotlib.pyplot as plt
import numpy as np
import utils.prettyplot as ptt
from matplotlib.colors import LogNorm
vmax = 1e-1
vmin = 1e-7
nticks = 5
dpi = 400
#cname ='gist_ncar'
cname ='gist_rainbow'
show=False
#%%
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
# By Jake VanderPlas
# License: BSD-style
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
N_color = 14
import utils.util
utils.util.reimport(draw.pp)
column = False
#import pyximport; pyximport.install()
for i in range(int(np.ceil(n_slices))):
#for i in range(51, 52):
region["ranges"][2]=[zmi + i * dz, zmi + (i+1) * dz]
s.set_ranges(region["ranges"])
#s.show_cpus()
s.add_hydro()
s.hydro.amr2cell(lmax=lmax)
field = draw.pp.pp_cell(s.hydro.cell, npix, s.info, verbose=False, column=column)
if column:
field /= depth_in_cm
for j in range(2):
save = wdir+str(i).zfill(3) + '_' + str(j) + 'hydro.png'
plt.ioff()
fig = plt.figure()
axes = fig.add_subplot(111)
if j == 0:
p = axes.imshow(field, cmap=discrete_cmap(N_color, cname),
norm=LogNorm(vmin=vmin, vmax=vmax))
if j == 1:
p = axes.imshow(field, cmap='gist_ncar',
norm=LogNorm(vmin=vmin, vmax=vmax))
xr = np.asarray(s.ranges[0])
yr = np.asarray(s.ranges[1])
x_old, x_new = ptt.tickInPboxInt(xr * s.info.pboxsize, field.shape[0], nticks=nticks)
y_old, y_new = ptt.tickInPboxInt(yr * s.info.pboxsize, field.shape[1], nticks=nticks)
fig.suptitle("Gas density averaged along the z-direction")
plt.xticks(x_old * field.shape[0], x_new)
plt.yticks(y_old * field.shape[1], y_new) # xticks, instead of set_xticks!
#axes.set_xticks(x_old * field.shape[0], x_new)
# axes.set_yticks(y_old * field.shape[1], y_new)
# if zposition:
annotation = 'z-range[Mpc]: {:.3f} - {:.3f}'.format(
region["ranges"][2][0]*s.info.pboxsize
,region["ranges"][2][1]*s.info.pboxsize)
axes.text(0.05, 1.02, annotation, transform = axes.transAxes, ha='left', fontsize=12)
cbar = plt.colorbar(p)
cbar.set_label(r'Hydrogen/$cm^{3}$', rotation=270, labelpad=10)
if show:
plt.show()
if save:
plt.savefig(save, dpi=dpi)
plt.close()
|
the-stack_0_25809
|
import numpy as np
import math
import os
import cv2
import torch
import random
import shutil
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def alpha2rot_y(alpha, x, cx, fx):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
rot_y = alpha + np.arctan2(x - cx, fx)
if rot_y > np.pi:
rot_y -= 2 * np.pi
if rot_y < -np.pi:
rot_y += 2 * np.pi
return rot_y
def unproject_2d_to_3d1(pt_2d, depth, P):
# pts_2d: 2
# depth: 1
# P: 3 x 4
# return: 3
z = depth - P[2, 3]
x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0]
y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1]
pt_3d = np.array([x, y, z], dtype=np.float32)
return pt_3d
def unproject_2d_to_3d(pt_2d, depth, P):
# pts_2d: 2
# depth: 1
# P: 3 x 4
# return: 3
z = depth + P[2, 3]
x=(pt_2d[0]*z-P[0, 3]-depth*P[0,2])/P[0,0]
y = (pt_2d[1] * z - P[1, 3] - depth * P[1, 2]) / P[1, 1]
pt_3d = np.array([x, y, z], dtype=np.float32)
return pt_3d
if __name__ == '__main__':
img2_path='/home/lipeixuan/3ddetectionkitti/data_object_image_2/training/image_2/'
img3_path = '/home/lipeixuan/3ddetectionkitti/data_object_image_3/training/image_3/'
calib_path = '/home/lipeixuan/3ddetectionkitti/data_object_calib/training/calib/'
label_path='/home/lipeixuan/3ddetectionkitti/label/training/label_2/'
outp='/home/lipeixuan/3ddetectionkitti/kitti_format/'
test_path='/home/lipeixuan/3ddetectionkitti/data_object_image_2/testing/image_2/'
image_target_path = outp + 'data/kitti/image/'
if not os.path.exists(image_target_path):
os.makedirs(image_target_path)
test_target_path = outp + 'data/kitti/test/'
if not os.path.exists(test_target_path):
os.makedirs(test_target_path)
calib_target_path = outp + 'data/kitti/calib/'
if not os.path.exists(calib_target_path):
os.makedirs(calib_target_path)
label_target_path = outp + 'data/kitti/label/'
if not os.path.exists(label_target_path):
os.makedirs(label_target_path)
images = os.listdir(test_path)
for idx in images:
img_name = os.path.join(test_path, idx)
shutil.copyfile(img_name, test_target_path + idx)
images=os.listdir(img2_path)
for idx in images:
img_name=os.path.join(img2_path,idx)
shutil.copyfile(img_name,image_target_path+idx)
images = os.listdir(img3_path)
for idx in images:
img_name = os.path.join(img3_path, idx)
idx_tar="{:06d}".format( int(float(idx[:6])+7481) )+'.png'
shutil.copyfile(img_name, image_target_path + idx_tar)
calibes = os.listdir(calib_path)
for idx in calibes:
img_name = os.path.join(calib_path, idx)
shutil.copyfile(img_name, calib_target_path + idx)
calibes = os.listdir(calib_path)
calibes = os.listdir(calib_path)
for idx in calibes:
img_name = os.path.join(calib_path, idx)
idx_tar = "{:06d}".format( int(float(idx[:6])+7481) ) + '.txt'
shutil.copyfile(img_name, calib_target_path + idx_tar)
labeles = os.listdir(label_path)
for idx in labeles:
img_name = os.path.join(label_path, idx)
shutil.copyfile(img_name, label_target_path + idx)
calibes = os.listdir(calib_path)
labeles = os.listdir(label_path)
for idx in labeles:
img_name = os.path.join(label_path, idx)
idx_tar = "{:06d}".format( int(float(idx[:6])+7481) ) + '.txt'
shutil.copyfile(img_name, label_target_path + idx_tar)
# src=os.path.join(data_path,idx)
# num=int(float(idx[:-4]))
# num+=num_f
# frame_id = get_image_index_str(num)+'.png'
# dis=os.path.join(dis_image,frame_id)
# os.rename(src,dis)
# data_path = path + 'velodyne_points/data/'
# images = os.listdir(data_path)
# for idx in images:
# # shutil.copyfile(calib,calib_dis+idx[:-4]+'.txt')
|
the-stack_0_25810
|
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2022/5/4 1:14 下午
# Description :
"""
import torch.nn as nn
from torch.distributions.categorical import Categorical
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim):
super(Actor, self).__init__()
self.actor = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim),
nn.ReLU(),
nn.Softmax(dim=-1),
)
def forward(self, state):
dist = self.actor(state)
dist = Categorical(dist)
return dist
class Critic(nn.Module):
def __init__(self, state_dim, hidden_dim):
super(Critic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, state):
value = self.critic(state)
return value
|
the-stack_0_25811
|
import pyautogui
import time
from datetime import date, datetime
data = date.today()
data_atual = data.strftime('%d/%m/%Y')
pyautogui.FAILSAFE = False
pyautogui.click(517, 453)
time.sleep(2)
pyautogui.write(data_atual)
time.sleep(2)
pyautogui.press("tab")
time.sleep(2)
pyautogui.write(data_atual)
#Consultar \/
time.sleep(5)
pyautogui.press("Enter")
time.sleep(10)
#Gerar 'xls' \/
pyautogui.click(1432, 866)
time.sleep(15)
#Opção 'sim' \/
pyautogui.press(["left", "Enter"])
time.sleep(15)
#Salvar arquivo \/
pyautogui.hotkey("f12")
time.sleep(1)
#Nome do arquivo \/
pyautogui.write("teste")
time.sleep(2)
#Salvar como 'csv' \/
pyautogui.press(["tab", "down","up","up","up", "enter", "enter"])
time.sleep(2)
#Fecha os arquivos \/
pyautogui.click(1895, 11)
pyautogui.click(1430, 192)
|
the-stack_0_25812
|
import argparse
import logging
import sys
import traceback
import stanza
from tqdm import tqdm
from tuw_nlp.grammar.ud_fl import UD_FL
from tuw_nlp.graph.fourlang import FourLang
from tuw_nlp.graph.lexical import LexGraphs
from tuw_nlp.graph.utils import graph_to_pn, pn_to_graph
from tuw_nlp.text.dictionary import Dictionary
from tuw_nlp.text.pipeline import CachedStanzaPipeline, CustomStanzaPipeline
from tuw_nlp.text.preprocessor import Preprocessor
class TextTo4lang():
def __init__(self, lang, nlp_cache, cache_dir=None):
if lang == 'de':
nlp = CustomStanzaPipeline(
processors='tokenize,mwt,pos,lemma,depparse')
elif lang == 'en':
nlp = stanza.Pipeline(
'en', processors='tokenize,mwt,pos,lemma,depparse')
elif lang == 'en_bio':
nlp = stanza.Pipeline(
'en', package="craft")
assert lang, "TextTo4lang does not have lang set"
self.lang = lang
self.nlp = CachedStanzaPipeline(nlp, nlp_cache)
self.ud_fl = UD_FL(cache_dir=cache_dir, lang=lang)
self.lexicon = Dictionary(lang)
self.graph_lexical = LexGraphs()
def add_definition(self, graph, node, definition, substitute, strategy):
sen = self.nlp(definition).sentences[0]
def_graph, root = self.parse(sen)
fourlang_graph = FourLang(def_graph, root, self.graph_lexical)
if len(def_graph.nodes()) > 0:
if strategy == "None":
graph.merge_definition_graph(
fourlang_graph, node, substitute)
elif strategy == "whitelisting":
fourlang_graph.whitelisting()
graph.merge_definition_graph(
fourlang_graph, node, substitute)
return [node[1]["name"] for node in fourlang_graph.G.nodes(data=True)]
def expand(self, graph, depth=1, substitute=False, expand_set=set(), strategy="None"):
if depth == 0:
return
if not expand_set:
nodes = [node for node in graph.G.nodes(data=True)]
else:
nodes = [node for node in graph.G.nodes(
data=True) if node[1]["name"] in expand_set]
for d_node, node_data in nodes:
if all(
elem not in node_data
for elem in ["expanded", "substituted"]):
node = graph.d_clean(node_data["name"]).split('_')[0]
if(node not in self.lexicon.stopwords or d_node == graph.root):
definition = self.lexicon.get_definition(node)
if definition:
definition_nodes = self.add_definition(
graph, d_node, definition, substitute, strategy)
if expand_set:
expand_set |= set(definition_nodes)
self.expand(graph, depth-1, substitute=substitute,
expand_set=expand_set, strategy=strategy)
def parse(self, sen):
fl = self.ud_fl.parse(sen, 'ud', "fl", 'amr-sgraph-src')
graph, root = pn_to_graph(fl)
relabeled_graph = self.graph_lexical.from_plain(graph)
return relabeled_graph, self.graph_lexical.vocab.get_id(
graph.nodes[root]["name"])
def __call__(self, text, depth=0, substitute=False, expand_set=set(), strategy="None"):
for sen in self.nlp(text).sentences:
graph, root = self.parse(sen)
fourlang = FourLang(graph, root, self.graph_lexical)
self.expand(fourlang, depth=depth, substitute=substitute, expand_set=expand_set, strategy=strategy)
yield fourlang.G
def __enter__(self):
self.nlp.__enter__()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.nlp.__exit__(exc_type, exc_value, exc_traceback)
def get_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument("-cd", "--cache-dir", default=None, type=str)
parser.add_argument("-cn", "--nlp-cache", default=None, type=str)
parser.add_argument("-l", "--lang", default=None, type=str)
parser.add_argument("-d", "--depth", default=0, type=int)
parser.add_argument("-s", "--substitute", default=False, type=bool)
parser.add_argument("-p", "--preprocessor", default=None, type=str)
return parser.parse_args()
def main():
logging.basicConfig(
format="%(asctime)s : " +
"%(module)s (%(lineno)s) - %(levelname)s - %(message)s")
logging.getLogger().setLevel(logging.WARNING)
args = get_args()
preproc = Preprocessor(args.preprocessor)
with TextTo4lang(args.lang, args.nlp_cache, args.cache_dir) as tfl:
for i, line in tqdm(enumerate(sys.stdin)):
try:
fl_graphs = list(tfl(preproc(line.strip())))
except (TypeError, IndexError, KeyError):
traceback.print_exc()
sys.stderr.write(f'error on line {i}: {line}')
print('ERROR')
continue
# sys.exit(-1)
print("\t".join(graph_to_pn(fl) for fl in fl_graphs))
if __name__ == "__main__":
main()
|
the-stack_0_25815
|
from warnings import warn
import numpy as np
import pandas as pd
def multilabel_sample(y, size=1000, min_count=5, seed=None):
""" Takes a matrix of binary labels `y` and returns
the indices for a sample of size `size` if
`size` > 1 or `size` * len(y) if size =< 1.
The sample is guaranteed to have > `min_count` of
each label.
"""
try:
if (np.unique(y).astype(int) != np.array([0, 1])).any():
raise ValueError()
except (TypeError, ValueError):
raise ValueError('multilabel_sample only works with binary indicator matrices')
if (y.sum(axis=0) < min_count).any():
raise ValueError('Some classes do not have enough examples. Change min_count if necessary.')
if size <= 1:
size = np.floor(y.shape[0] * size)
if y.shape[1] * min_count > size:
msg = "Size less than number of columns * min_count, returning {} items instead of {}."
warn(msg.format(y.shape[1] * min_count, size))
size = y.shape[1] * min_count
rng = np.random.RandomState(seed if seed is not None else np.random.randint(1))
if isinstance(y, pd.DataFrame):
choices = y.index
y = y.values
else:
choices = np.arange(y.shape[0])
sample_idxs = np.array([], dtype=choices.dtype)
# first, guarantee > min_count of each label
for j in range(y.shape[1]):
label_choices = choices[y[:, j] == 1]
label_idxs_sampled = rng.choice(label_choices, size=min_count, replace=False)
sample_idxs = np.concatenate([label_idxs_sampled, sample_idxs])
sample_idxs = np.unique(sample_idxs)
# now that we have at least min_count of each, we can just random sample
sample_count = int(size - sample_idxs.shape[0])
# get sample_count indices from remaining choices
remaining_choices = np.setdiff1d(choices, sample_idxs)
remaining_sampled = rng.choice(remaining_choices,
size=sample_count,
replace=False)
return np.concatenate([sample_idxs, remaining_sampled])
def multilabel_sample_dataframe(df, labels, size, min_count=5, seed=None):
""" Takes a dataframe `df` and returns a sample of size `size` where all
classes in the binary matrix `labels` are represented at
least `min_count` times.
"""
idxs = multilabel_sample(labels, size=size, min_count=min_count, seed=seed)
return df.loc[idxs]
def multilabel_train_test_split(X, Y, size, min_count=5, seed=None):
""" Takes a features matrix `X` and a label matrix `Y` and
returns (X_train, X_test, Y_train, Y_test) where all
classes in Y are represented at least `min_count` times.
"""
index = Y.index if isinstance(Y, pd.DataFrame) else np.arange(Y.shape[0])
test_set_idxs = multilabel_sample(Y, size=size, min_count=min_count, seed=seed)
train_set_idxs = np.setdiff1d(index, test_set_idxs)
test_set_mask = index.isin(test_set_idxs)
train_set_mask = ~test_set_mask
return (X[train_set_mask], X[test_set_mask], Y[train_set_mask], Y[test_set_mask])
|
the-stack_0_25816
|
import inspect
from fastapi import APIRouter, FastAPI
method_names = ["get", "put", "post", "delete", "options", "head", "patch", "trace"]
def test_signatures_consistency():
base_sig = inspect.signature(APIRouter.get)
for method_name in method_names:
router_method = getattr(APIRouter, method_name)
app_method = getattr(FastAPI, method_name)
router_sig = inspect.signature(router_method)
app_sig = inspect.signature(app_method)
param: inspect.Parameter
for key, param in base_sig.parameters.items():
router_param: inspect.Parameter = router_sig.parameters[key]
app_param: inspect.Parameter = app_sig.parameters[key]
assert param.annotation == router_param.annotation
assert param.annotation == app_param.annotation
assert param.default == router_param.default
assert param.default == app_param.default
|
the-stack_0_25818
|
#! /usr/local/bin/python3
# ----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ----------------------------------------------------------------------------
# This script is used to troubleshoot the process of sending CEF messages to the
# workspace.
# In this script we check the configuration of the daemon and the OMS linux agent.
# We send mock data to validate correctness of the pipeline
# Supported OS:
# 64-bit
# CentOS 6 and 7
# Amazon Linux 2017.09
# Oracle Linux 6 and 7
# Red Hat Enterprise Linux Server 6 and 7
# Debian GNU/Linux 8 and 9
# Ubuntu Linux 14.04 LTS, 16.04 LTS and 18.04 LTS
# SUSE Linux Enterprise Server 12
# 32-bit
# CentOS 6
# Oracle Linux 6
# Red Hat Enterprise Linux Server 6
# Debian GNU/Linux 8 and 9
# Ubuntu Linux 14.04 LTS and 16.04 LTS
# For more information please check the OMS-Agent-for-Linux documentation.
#
# Daemon versions:
# Syslog-ng: 2.1 - 3.22.1
# Rsyslog: v8
import sys
import select
import subprocess
import time
daemon_port = "514"
agent_port = "25226"
rsyslog_security_config_omsagent_conf_content_tokens = ["if", "contains", "then", "@127.0.0.1:25226", "CEF:", "ASA-"]
rh_firewalld_agent_exception_tokens = ["INPUT", "tcp", "--dport", "25226", "ACCEPT"]
syslog_ng_security_config_omsagent_conf_content_tokens = ["f_oms_filter", "oms_destination", "port(25226)", "tcp",
"source", "s_src", "oms_destination"]
oms_agent_configuration_content_tokens = [daemon_port, "127.0.0.1"]
oms_agent_process_name = "opt/microsoft/omsagent"
oms_agent_plugin_securiy_config = '/opt/microsoft/omsagent/plugin/security_lib.rb'
oms_agent_field_mapping_configuration = '/opt/microsoft/omsagent/plugin/filter_syslog_security.rb'
oms_agent_selinux_documentation = "https://docs.microsoft.com/azure/azure-monitor/platform/agent-linux"
syslog_log_dir = ["/var/log/syslog", "/var/log/messages"]
red_hat_rsyslog_security_enhanced_linux_documentation = "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/using_selinux/index"
red_hat_security_enhanced_permanent_documentation = "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/using_selinux/changing-selinux-states-and-modes_using-selinux#changing-selinux-modes_changing-selinux-states-and-modes"
rsyslog_daemon_forwarding_configuration_path = "/etc/rsyslog.d/security-config-omsagent.conf"
syslog_ng_daemon_forwarding_configuration_path = "/etc/syslog-ng/conf.d/security-config-omsagent.conf"
rsyslog_daemon_forwarding_configuration_dir_path = "/etc/rsyslog.d/"
syslog_ng_daemon_forwarding_configuration_dir_path = "/etc/syslog-ng/conf.d/"
rsyslog_daemon_name = "rsyslog.d"
rsyslog_process_name = "rsyslogd"
syslog_ng_process_name = "syslog-ng"
syslog_ng_default_config_path = "/etc/syslog-ng/syslog-ng.conf"
syslog_ng_documantation_path = "https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.26/administration-guide/34#TOPIC-1431029"
rsyslog_documantation_path = "https://www.rsyslog.com/doc/master/configuration/actions.html"
tcpdump_time_restriction = 60
def print_error(input_str):
print("\033[1;31;40m" + input_str + "\033[0m")
def print_ok(input_str):
print("\033[1;32;40m" + input_str + "\033[0m")
def print_warning(input_str):
print("\033[1;33;40m" + input_str + "\033[0m")
def print_notice(input_str):
print("\033[0;30;47m" + input_str + "\033[0m")
def print_command_response(input_str):
print("\033[1;34;40m" + input_str + "\033[0m")
def get_mock_message(index):
fixed_message = "0|TestCommonEventFormat|MOCK|common=event-format-test|end|TRAFFIC|1|rt=$common=event-formatted-receive_time deviceExternalId=0002D01655 src=1.1.1.1 dst=2.2.2.2 sourceTranslatedAddress=1.1.1.1 destinationTranslatedAddress=3.3.3.3 cs1Label=Rule cs1=CEF_TEST_InternetDNS "
return fixed_message + "|data" + str(index) + "=example"
def send_cef_message_local(port, amount):
try:
for index in range(0, amount):
message_to_send = get_mock_message(index)
command_tokens = ["logger", "-p", "local4.warn", "-t", "CEF:", message_to_send, "-P", str(port), "-n",
"127.0.0.1"]
logger = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)
o, e = logger.communicate()
if e is not None:
print_error("Error could not send cef mock message")
return
except OSError:
print_warning(
"Warning: Could not execute \'logger\' command which is a part of the syslog daemon this means that no mock message was sent to your workspace.")
def check_red_hat_firewall_issue():
'''
Checking if a firewall is found on the device if firewall was found,
trying to see if the agent port was added as exception
if so restart the firewall
:return:
'''
print_notice("Checking if firewalld is installed.")
print_notice("systemctl status firewalld")
firewall_status = subprocess.Popen(["systemctl", "status", "firewalld"], stdout=subprocess.PIPE)
o, e = firewall_status.communicate()
if e is not None:
print_error("Error: could not check CentOS / RHEL 7 firewalld status.")
else:
if "running" in str(o):
print_warning(
"Warning: you have a firewall running on your linux machine this can prevent communication between the syslog daemon and the omsagent.")
print_notice("Checking if firewall has exception for omsagent port [" + agent_port + "]")
if validate_rh_firewall_exception():
print_ok("Found exception in the firewalld for the omsagent port.[" + agent_port + "]")
else:
print_warning("Warning: no exception found for omsagent in the firewall")
print_warning(
"You can add exception for the agent port[" + agent_port + "] by using the following commands:")
print_warning("Add exception:")
print_notice("sudo firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 -p tcp --dport " + agent_port + " -j ACCEPT")
print_warning("Reload the firewall:")
print_notice("sudo firewall-cmd --reload")
print_warning("Validate the exception was added in the configuration:")
print_notice("sudo firewall-cmd --direct --get-rules ipv4 filter INPUT")
print_warning("You can disable your firewall by using this command - not recommended:")
print_notice("sudo systemctl stop firewalld")
def validate_rh_firewall_exception():
'''
Validating that a firewall rule with the agents port as an exception exists
:return: True if exception was found, False otherwise
'''
iptables = subprocess.Popen(["sudo", "iptables-save"], stdout=subprocess.PIPE)
grep = subprocess.Popen(["sudo", "grep", agent_port], stdin=iptables.stdout, stdout=subprocess.PIPE)
o, e = grep.communicate()
if e is not None or o is None:
# either an error running the command or no rules exist
return False
else:
content = o.decode(encoding='UTF-8')
rules = content.split('\n')
for rule in rules:
# reviewing all rules until a match is found
is_exception = True
for token in rh_firewalld_agent_exception_tokens:
# comparing expected rule tokens with existing rule key words
if token not in rule:
# not an exception- exit loop and move to next rule
is_exception = False
break
if is_exception:
return True
return False
def restart_red_hat_firewall_d():
'''
Method for restarting the firewall_d
:return:
'''
print("Trying to restart firewall_d")
print_notice("sudo firewall-cmd --reload")
restart = subprocess.Popen(["sudo", "firewall-cmd", "--reload"], stdout=subprocess.PIPE)
o, e = restart.communicate()
time.sleep(2)
if e is not None:
print_error("Error: could not get /etc/firewalld/zones/public.xml file holding firewall exceptions.")
else:
print_ok("Restarted firewalld.")
def security_enhanced_linux_enabled():
print("Checking if security enhanced linux is enabled")
print_notice("getenforce")
command_tokens = ["sudo", "getenforce"]
getenforce_command = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)
o, e = getenforce_command.communicate()
if e is not None or getenforce_command.returncode != 0:
print_error("Could not execute \'getenforce\' to check if security enhanced linux is enabled")
print_notice("please install \'policycoreutils\' package and run the troubleshoot script again")
else:
if o == 'Enforcing\n':
return True
return False
def security_enhanced_linux():
if security_enhanced_linux_enabled() is True:
print_warning(
"Security enhanced linux is in Enforcing mode.\n"
"This is not supported by the OMS Agent and can harm the communication with it.\n"
"For more information: " + oms_agent_selinux_documentation)
print_notice("To set SELinux to Permissive mode use elevated privileges to perform the following:")
print_notice("Run the following command to temporarily change SELinux to permissive mode: \"setenforce 0\"")
print_notice("Please restart the syslog daemon running on your machine")
print_notice("In order to make changes permanent please visit: " + red_hat_security_enhanced_permanent_documentation)
print_notice("For more information on SELinux: " + red_hat_rsyslog_security_enhanced_linux_documentation)
else:
pass
def rsyslog_get_cef_log_counter():
'''
Count using tac and wc -l the amount of CEF messages arrived and see it is in increasing
count
:return:
'''
print("Validating the CEF\\ASA logs are received and are in the correct format when received by syslog daemon")
print_notice("sudo tac /var/log/syslog")
tac = subprocess.Popen(["sudo", "tac", syslog_log_dir[0]], stdout=subprocess.PIPE)
grep = subprocess.Popen(["grep", "-E", "CEF\|ASA"], stdin=tac.stdout, stdout=subprocess.PIPE)
count_lines = subprocess.Popen(["wc", "-l"], stdin=grep.stdout, stdout=subprocess.PIPE)
o, e = count_lines.communicate()
output = o.decode(encoding='UTF-8')
if e is None:
print("Located " + str(output) + " CEF\\ASA messages")
return int(output)
elif "No such file or directory" in output:
print("Validating the CEF\\ASA logs are received and are in the correct format when received by syslog daemon")
print_notice("sudo tac /var/log/messages")
tac = subprocess.Popen(["sudo", "tac", syslog_log_dir[1]], stdout=subprocess.PIPE)
grep = subprocess.Popen(["grep", "-E", "CEF\|ASA"], stdin=tac.stdout, stdout=subprocess.PIPE)
count_lines = subprocess.Popen(["wc", "-l"], stdin=grep.stdout, stdout=subprocess.PIPE)
o, e = count_lines.communicate()
output = o.decode(encoding='UTF-8')
if e is None:
print("Located " + str(output) + " CEF messages")
return int(output)
print_error("Error: could not find CEF\\ASA logs.")
print_notice("Notice: execute \"sudo tac /var/log/syslog or /var/log/messages | grep -E \"CEF|ASA\" -m 10\" manually.")
return 0
def rsyslog_cef_logs_received_in_correct_format():
print("Fetching CEF messages from daemon files.")
print("Taking 2 snapshots in 5 seconds diff and compering the amount of CEF messages.")
print("If found increasing CEF messages daemon is receiving CEF messages.")
start_amount = rsyslog_get_cef_log_counter()
time.sleep(5)
end_amount = rsyslog_get_cef_log_counter()
if end_amount > start_amount:
print_ok("Received CEF messages by the daemon")
else:
print_warning(
"Error: no CEF messages received by the daemon.\nPlease validate that you do send CEF messages to agent.")
def handle_tcpdump_line(line, incoming_port, ok_message):
if "CEF" in line or "ASA" in line:
print_ok(ok_message)
print_notice(
"Notice: To tcp dump manually execute the following command - \'tcpdump -A -ni any port " + incoming_port + " -vv\'")
time.sleep(1)
return True
# Handle command not found
elif "command not found" in line:
print_error(
"Notice that \'tcpdump\' is not installed in your linux machine.\nWe cannot monitor traffic without it.\nPlease install \'tcpdump\'.")
return False
else:
# print the output
print_command_response(line.rstrip())
return False
def incoming_logs_validations(incoming_port, ok_message, mock_message=False):
'''
Validate that there is incoming traffic of CEF messages to the given port
:param mock_message: Tells if to mock messages into the tcpdump
:param mock_messages: Tels if to send mock messages to the pipe to validate it
:param incoming_port: port to validate
:param ok_message: message printed if found CEF messages
:return:
'''
start_seconds = int(round(time.time()))
end_seconds = int(round(time.time()))
print("This will take " + str(tcpdump_time_restriction) + " seconds.")
command_tokens = ["sudo", "tcpdump", "-A", "-ni", "any", "port", incoming_port, "-vv"]
print_notice(" ".join(command_tokens))
tcp_dump = subprocess.Popen(command_tokens, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
poll_obj = select.poll()
poll_obj.register(tcp_dump.stdout, select.POLLIN)
while (end_seconds - start_seconds) < tcpdump_time_restriction:
if mock_message is True:
# Sending mock messages
send_cef_message_local(daemon_port, 1)
poll_result = poll_obj.poll(0)
if poll_result:
line = str(tcp_dump.stdout.readline())
if handle_tcpdump_line(line, incoming_port, ok_message):
return True
end_seconds = int(round(time.time()))
print_error("Could not locate \"CEF\" message in tcpdump")
return False
def netstat_open_port(in_port, ok_message, error_message):
netstat = subprocess.Popen(["sudo", "netstat", "-an"], stdout=subprocess.PIPE)
print("Incoming port grep: " + in_port)
grep = subprocess.Popen(["grep", in_port], stdin=netstat.stdout, stdout=subprocess.PIPE)
o, e = grep.communicate()
output = o.decode(encoding='UTF-8')
print(output)
if e is None and in_port in output:
print_ok(ok_message)
return True
print_error(error_message)
return False
def check_file_in_directory(file_name, path):
'''
Check if the given file is found in the current directory.
:param path:
:param file_name:
:return: return True if it is found elsewhere False
'''
current_dir = subprocess.Popen(["ls", "-ltrh", path], stdout=subprocess.PIPE)
grep = subprocess.Popen(["grep", "-i", file_name], stdin=current_dir.stdout, stdout=subprocess.PIPE)
o, e = grep.communicate()
output = o.decode(encoding='UTF-8')
if e is None and file_name in output:
return True
return False
def locate_check(process_name):
'''
Check if the process_name is installed using the locate command
:param process_name:onfiguration under the nam
:return: True if locate has returned a valid value else False
'''
try:
print("Trying to use the \'locate\' command to locate " + process_name)
locate = subprocess.Popen(["locate", process_name], stdout=subprocess.PIPE)
o, e = locate.communicate()
response = o.decode(encoding='UTF-8')
if e is not None:
print_warning("Warning: Could not execute \'locate\' command.")
print_notice(
"Notice: To install locate command - \"sudo yum install mlocate[On CentOS/RHEL]\" or \"sudo apt"
" install mlocate[On Debian/Ubuntu] \"")
elif response == "":
print_error("Error: Could not locate \'omsagent\' trying to validate by checking the process.\n")
return False
else:
print_ok("Located \'omsagent\'")
return True
except OSError:
print_warning("Warning: Could not execute \'locate\' command.")
print_notice("Notice: To install locate command - \"sudo yum install mlocate[On CentOS/RHEL]\" or \"sudo apt"
" install mlocate[On Debian/Ubuntu] \"")
def omsagent_process_check(oms_process_name):
tokens = process_check(oms_process_name)
if len(tokens) > 0:
for single_token in tokens:
if oms_agent_process_name in single_token:
print_ok("Found omsagent process running on this machine.")
return True
print_error("Error: Could not find omsagent process running on this machine.")
return False
def process_check(process_name):
'''
function who check using the ps -ef command if the 'process_name' is running
:param process_name:
:return: True if the process is running else False
'''
p1 = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "-i", process_name], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["grep", "-v", "grep"], stdin=p2.stdout, stdout=subprocess.PIPE)
o, e = p3.communicate()
tokens = o.decode(encoding='UTF-8').split('\n')
tokens.remove('')
return tokens
def check_oms_agent_status():
'''
Checking if the OMS agent is installed and running this is done by:
1. using the locate command if one is installed
2. using pe -ef - will check if the agent is running
:return: True if the process is installed and/or running false elsewhere
'''
agent_name = "omsagent"
is_located = locate_check(agent_name)
is_process_running = process_check(agent_name)
if not is_located and not is_process_running:
print_error("Error: Oms agent is not installed or running on this machine")
return False
else:
return True
def check_omsagent_cisco_asa_configuration(workspace_id):
'''
Checking if the OMS agent is able to parse Cisco ASA:
:return: True if the configuration is updated, false otherwise
'''
grep = subprocess.Popen(["grep", "-i", "return ident if ident.include?('%ASA')",
oms_agent_plugin_securiy_config], stdout=subprocess.PIPE)
o, e = grep.communicate()
if not o:
print_warning("Warning: Current content of the omsagent security configuration doesn't support"
" Cisco ASA parsing.\nTo enable Cisco ASA firewall events parsing run the following: \n"
"\"sed -i \"s|return \'%ASA\' if ident.include?(\'%ASA\')"
"|return ident if ident.include?(\'%ASA\')|g\" " + oms_agent_plugin_securiy_config +
" && sudo /opt/microsoft/omsagent/bin/service_control restart " + workspace_id + "\"\n")
return False
else:
print_ok("omsagent security configuration supports Cisco ASA parsing \n")
return True
def check_syslog_computer_field_mapping(workspace_id):
'''
Checking if the OMS agent maps the Computer field correctly:
:return: True if the mapping configuration is correct, false otherwise
'''
grep = subprocess.Popen(["grep", "-i", "'Host' => record\['host'\]",
oms_agent_field_mapping_configuration], stdout=subprocess.PIPE)
o, e = grep.communicate()
if not o:
print_warning("Warning: Current content of the omsagent syslog filter mapping configuration doesn't map the"
" Computer field from your hostname.\nTo enable the Computer field mapping, please run: \n"
"\"sed -i -e \"/'Severity' => tags\[tags.size - 1\]/ a \ \\t 'Host' => record['host']\""
" -e \"s/'Severity' => tags\[tags.size - 1\]/&,/\" " + oms_agent_field_mapping_configuration +
" && sudo /opt/microsoft/omsagent/bin/service_control restart " + workspace_id + "\"")
return False
else:
print_ok("OMS Agent syslog field mapping is correct \n")
return True
def file_contains_string(file_tokens, file_path):
print_notice(file_path)
content = open(file_path).read()
print_command_response("Current content of the daemon configuration is:\n" + content)
return all(check_token(token, content) for token in file_tokens)
def sudo_read_file_contains_string(file_tokens, file_path):
restart = subprocess.Popen(["sudo", "cat", file_path], stdout=subprocess.PIPE)
o, e = restart.communicate()
if e is not None:
print_error("Error: could not load " + file_path)
return False
else:
content = o.decode(encoding='UTF-8')
print_command_response("Current content of the daemon configuration is:\n" + content)
return all(token in file_tokens for token in file_tokens)
def check_token(tokens, file_content):
splited_tokens = tokens.split("|")
return any(token in file_content for token in splited_tokens)
def test_daemon_configuration(daemon_name):
'''
Checking if the daemon configuration file and folder exists
:param daemon_name:
:return: True if exists
'''
print("Testing if the daemon configuration folder exists")
is_daemon_dir_exists = check_file_in_directory(daemon_name, "/etc/")
if not is_daemon_dir_exists:
print_error("Could not locate " + daemon_name + " directory.[under \'/etc/\']")
return False
print_ok("Located /etc/" + daemon_name + " directory.")
print("Checking omsagent configuration under the name of: \'security-config-omsagent.conf\'")
config_exists = check_file_in_directory("security-config-omsagent.conf",
rsyslog_daemon_forwarding_configuration_dir_path if daemon_name == rsyslog_daemon_name else syslog_ng_daemon_forwarding_configuration_dir_path)
if not config_exists:
print_error("security-config-omsagent.conf does not exists in " + daemon_name + " directory")
return False
else:
print_ok("Located security-config-omsagent.conf")
return True
def validate_daemon_configuration_content(daemon_name, valid_content_tokens_arr):
print("Trying to validate the content of daemon configuration.")
print_notice(
"For extra verification please make sure the configuration content is as defined in the documentation.")
# set path according to the daemon
path = rsyslog_daemon_forwarding_configuration_path if daemon_name == rsyslog_daemon_name else syslog_ng_daemon_forwarding_configuration_path
if not file_contains_string(valid_content_tokens_arr, path):
return False
else:
return True
def security_config_omsagent_test(workspace_id):
path = "/etc/opt/microsoft/omsagent/" + workspace_id + "/conf/omsagent.d/"
is_security_config_omsagent_dir_exists = check_file_in_directory("security_events.conf", path)
if not is_security_config_omsagent_dir_exists:
print_error(
"Error: Could not locate security_events.conf which configures the OMS agent to listen on port " + agent_port)
return False
else:
print_ok("Located security_events.conf")
return True
def omsagent_security_event_conf_validation(workspace_id):
path = "/etc/opt/microsoft/omsagent/" + workspace_id + "/conf/omsagent.d/security_events.conf"
print_notice("Validating " + path + " content.")
if not sudo_read_file_contains_string(file_tokens=oms_agent_configuration_content_tokens, file_path=path):
print_error("Could not locate necessary port and ip in the agent's configuration.\npath:" + path)
else:
print_ok("Omsagent event configuration content is valid")
def check_daemon(daemon_name):
tokens = process_check(daemon_name)
print(tokens)
if len(tokens) > 0:
for single_token in tokens:
if "/usr/sbin/" + daemon_name in single_token:
print_ok("Found " + daemon_name + " process running on this machine.")
return True
elif check_file_in_directory(daemon_name, "/etc/"):
print_notice("Notice: " + daemon_name + " is not running but found configuration directory for it.")
return False
return False
def restart_daemon(daemon_name):
print("Restarting " + daemon_name + " daemon - \'sudo service rsyslog restart\'")
restart = subprocess.Popen(["sudo", "service", daemon_name, "restart"], stdout=subprocess.PIPE)
o, e = restart.communicate()
if e is not None:
print_error("Error: could not restart " + daemon_name + "syslog daemon")
return
else:
print_ok("" + daemon_name + " daemon restarted.")
print("This will take a few seconds.")
time.sleep(8)
def restart_omsagent(workspace_id):
restart = subprocess.Popen(["sudo", "/opt/microsoft/omsagent/bin/service_control", "restart", workspace_id],
stdout=subprocess.PIPE)
o, e = restart.communicate()
if e is not None:
print_error("Error: could not restart omsagent")
return
else:
print_ok("Omsagent restarted.")
print("This will take a few seconds.")
time.sleep(8)
def check_rsyslog_configuration():
udp = False
tcp = False
if check_file_in_directory("rsyslog.conf", "/etc/"):
content = open("/etc/rsyslog.conf").read()
lines = content.split("\n")
print("Checking daemon incoming connection for tcp and udp")
for line in lines:
# second part is for red hat [DPServerRun]
if "imudp" in line or "DPServerRun" in line:
if "#" in line:
udp = False
else:
udp = True
# second part is for red hat [InputTCPServerRun]
if "imtcp" in line or "InputTCPServerRun" in line:
if "#" in line:
tcp = False
else:
tcp = True
if not udp:
print_warning("Warning: udp communication is not enabled to the daemon.")
if not tcp:
print_warning("Warning: tcp communication is not enabled to the daemon.")
return udp or tcp
def handle_syslog_ng(workspace_id):
print("Checking syslog-ng:")
if test_daemon_configuration("syslog-ng"):
daemon_config_valid = validate_daemon_configuration_content("syslog-ng",
syslog_ng_security_config_omsagent_conf_content_tokens)
if daemon_config_valid:
print_ok("Syslog-ng daemon configuration was found valid.")
print("Trying to restart syslog daemon")
restart_daemon("syslog-ng")
restart_omsagent(workspace_id)
netstat_open_port("0.0.0.0:" + daemon_port, "Daemon incoming port " + daemon_port + " is open",
"Error: daemon incoming port is not open, please check that the process is up and running and the port is configured correctly.")
netstat_open_port(agent_port, "Omsagent is listening to incoming port " + agent_port,
"Error: agent is not listening to incoming port " + agent_port + " please check that the process is up and running and the port is configured correctly.[Use netstat -an | grep [daemon port] to validate the connection or re-run ths script]")
print("Validating CEF into syslog-ng daemon")
time.sleep(1)
incoming_logs_validations(daemon_port,
"Received CEF message in daemon incoming port.[" + daemon_port + "]", mock_message=False)
time.sleep(1)
incoming_logs_validations(agent_port,
"Received CEF message in agent incoming port.[" + agent_port + "]", mock_message=False)
else:
print_error("Error: syslog-ng daemon configuration was found invalid.")
print_notice("Notice: please make sure:")
print_notice("\t/etc/syslog-ng/conf.d/security-config-omsagent.conf file exists")
def handle_rsyslog(workspace_id):
print("Checking rsyslog daemon:")
if test_daemon_configuration("rsyslog.d"):
print_ok(
"rsyslog daemon found, checking daemon configuration content - forwarding all data to port " + daemon_port)
daemon_config_valid = validate_daemon_configuration_content("rsyslog.d",
rsyslog_security_config_omsagent_conf_content_tokens)
if not daemon_config_valid:
print_error("Error: found an outdated rsyslog daemon configuration file: " + rsyslog_daemon_forwarding_configuration_path)
print_notice("The updated file should contain the following configuration: \'if $rawmsg contains \"CEF:\""
" or $rawmsg contains \"ASA-\" then @@127.0.0.1:" + agent_port + "\'")
print_notice("Notice: Please run the following command to update the configuration and restart the rsyslog daemon:")
print_notice("\"echo \'if $rawmsg contains \"CEF:\" or $rawmsg contains \"ASA-\" then @@127.0.0.1:" + agent_port +
"\' > /etc/rsyslog.d/security-config-omsagent.conf && service rsyslog restart\"")
else:
print_ok("rsyslog daemon configuration was found valid.")
print("Trying to restart syslog daemon")
restart_daemon("rsyslog")
restart_omsagent(workspace_id)
netstat_open_port("0.0.0.0:" + daemon_port, "Daemon incoming port " + daemon_port + " is open",
"Error: daemon incoming port is not open, please check that the process is up and running and the port is configured correctly.\nAction: enable ports in \'/etc/rsyslog.conf\' file which contains daemon incoming ports.")
netstat_open_port(agent_port, "Omsagent is listening to incoming port " + agent_port,
"Error: agent is not listening to incoming port " + agent_port + " please check that the process is up and running and the port is configured correctly.[Use netstat -an | grep [daemon port] to validate the connection or re-run ths script]")
print("Validating CEF\\ASA into rsyslog daemon - port " + daemon_port)
time.sleep(1)
incoming_logs_validations(daemon_port,
"Received CEF\\ASA message in daemon incoming port.[" + daemon_port + "]", mock_message=False)
time.sleep(1)
rsyslog_cef_logs_received_in_correct_format()
# after validating logs are arriving validation that the daemon will accept them
if check_rsyslog_configuration():
incoming_logs_validations(agent_port,
"Received CEF message in agent incoming port.[" + agent_port + "]", mock_message=False)
time.sleep(1)
def print_full_disk_warning():
warn_message = "Warning: please make sure your logging daemon configuration does not store unnecessary logs. " \
"This may cause a full disk on your machine, which will disrupt the function of the oms agent installed." \
" For more information:"
if check_daemon(rsyslog_process_name):
if check_daemon(syslog_ng_process_name):
print_warning(warn_message + '\n' + rsyslog_documantation_path + '\n' + syslog_ng_documantation_path)
else:
print_warning(warn_message + '\n' + rsyslog_documantation_path)
elif check_daemon(syslog_ng_process_name):
print_warning(warn_message + '\n' + syslog_ng_documantation_path)
else:
print_warning("No daemon was found on the machine")
def main():
print_notice("Note this script should be run in elevated privileges")
print_notice("Please validate you are sending CEF messages to agent machine.")
if len(sys.argv) != 2:
print_error("The installation script is expecting 1 arguments:")
print_error("\t1) workspace id")
return
else:
workspace_id = sys.argv[1]
# test the oms agent is installed
check_oms_agent_status()
# test oms agent configuration
security_config_omsagent_test(workspace_id=workspace_id)
omsagent_security_event_conf_validation(workspace_id=workspace_id)
check_omsagent_cisco_asa_configuration(workspace_id=workspace_id)
check_syslog_computer_field_mapping(workspace_id=workspace_id)
# validate firewalld
check_red_hat_firewall_issue()
# Check issue regarding security enhanced linux
security_enhanced_linux()
# testing that the daemon is running
if check_daemon("rsyslog"):
handle_rsyslog(workspace_id)
elif check_daemon("syslog-ng"):
handle_syslog_ng(workspace_id)
print("Simulating mock data which you can find in your workspace")
# we always simulate to the daemon port
incoming_logs_validations(agent_port, "Mock messages sent and received in daemon incoming port [" + daemon_port + "] and to the omsagent port [" + agent_port + "].", mock_message=True)
print_full_disk_warning()
print_ok("Completed troubleshooting.")
print(
"Please check Log Analytics to see if your logs are arriving. All events streamed from these appliances appear in raw form in Log Analytics under CommonSecurityLog type")
print("Notice: If no logs appear in workspace try looking at omsagent logs:")
print_notice("tail -f /var/opt/microsoft/omsagent/" + workspace_id + "/log/omsagent.log")
print_warning("Warning: Make sure that the logs you send comply with RFC 5424.")
main()
|
the-stack_0_25821
|
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.shell.command import map_parameters
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.systeminfo import os_is_windows
from cloudmesh.common.util import banner
from cloudmesh.common.Shell import Shell
from cloudmesh.common.Printer import Printer
from cloudmesh.common.Host import Host
from cloudmesh.common.console import Console
from cloudmesh.slurm.slurm import Slurm
import subprocess
class SlurmCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_slurm(self, args, arguments):
"""
::
Usage:
slurm pi install [--workers=WORKERS] [--partition=pis]
slurm pi install as host [--os=OS] [--hosts=HOSTS] [--partition=pis]
slurm pi example --n=NUMBER
slurm pi sbatch initialize [--hosts=HOSTS]
This command installs slurm on the current PI and also worker nodes if you specify them.
The manager can also be a worker by using the single-node method. For example, red can be
a manager and worker, simultaneously, by issuing
cms slurm pi install as host --hosts=red,red
Arguments:
COMMAND the slurm command to be executed [default: salloc]
Options:
--partition=pis specifies the name of the slurm partition to be created [default: pis]
Description:
Install:
pip install cloudmesh-slurm
cms help
cms slurm pi install
Example:
cms slurm pi example --n=4 [COMMAND]
MODE is one of salloc, srun, sbatch
will run the command
salloc -N 4 mpiexec python -m mpi4py.bench helloworld
API:
from cloudmesh.slurm.slurm import Slurm
from cloudmesh.slurm import Slurm
Slurm.install()
in case you use self
slurm = Slurm() slef instead of Slurm
slurm.install
"""
map_parameters(arguments,
"hosts", "workers", "partition")
VERBOSE(arguments)
if arguments["as"] and arguments.host and arguments.pi and arguments.install:
" slurm pi install as host [--workers=WORKERS] [--partition=PARTITION]"
from cloudmesh.slurm.workflow import Workflow
VERBOSE(arguments)
thelambdafunction = lambda: Slurm.install(workers=workers, is_host_install=True,
input_manager=manager, hosts=arguments.hosts,
partition=arguments.partition)
steps = [
thelambdafunction, thelambdafunction, thelambdafunction, thelambdafunction
]
manager = arguments.hosts[:arguments.hosts.index(",")]
workers = (arguments.hosts.split(",",1)[1])
# workers = Parameter.expand(arguments.hosts)[1:]
# manager = Parameter.expand(arguments.hosts)[0]
'''
step0 = Slurm.install(workers=workers, is_host_install=True, input_manager=manager, hosts=arguments.hosts)
step1 = Slurm.install(workers=workers, is_host_install=True, input_manager=manager, hosts=arguments.hosts)
step2 = Slurm.install(workers=workers, is_host_install=True, input_manager=manager,
mount=arguments.mount, hosts=arguments.hosts)
step3 = Slurm.install(workers=workers, is_host_install=True, input_manager=manager, hosts=arguments.hosts)
step4 = Slurm.install(workers=workers, is_host_install=True, input_manager=manager, hosts=arguments.hosts)
'''
w = Workflow(arguments.hosts,trials=1,delay=1)
w.run(steps=steps)
'''
workers = Parameter.expand(arguments.hosts)[1:]
manager = Parameter.expand(arguments.hosts)[0]
Slurm.install(is_host_install=True, input_manager=manager, hosts=arguments.hosts)
'''
elif arguments.install and arguments.pi and not arguments["as"]:
# slurm pi install [--interactive] [--os=OS] [--workers=WORKERS]
# arguments.workers = Parameter.expand(arguments.workers)
Slurm.install(workers=arguments.workers, partition=arguments.partition)
elif arguments.pi and arguments.example:
# slurm pi example --n=NUMBER [COMMAND]
# salloc -N 4 mpiexec python -m mpi4py.bench helloworld
number_nodes = arguments["--n"]
command = f"salloc -N {number_nodes} mpiexec python -m mpi4py.bench helloworld"
try:
r = Shell.run(command)
print(r)
except subprocess.CalledProcessError as e:
if os_is_windows:
banner('You may have run the command on host by mistake. Please run '
'this command on the Pi.')
elif arguments.sbatch and arguments.initialize:
if not arguments.hosts:
Console.error("Please supply the hostnames of the Pis to initialize sbatch on.")
return ""
manager = arguments.hosts[:arguments.hosts.index(",")]
workers = (arguments.hosts.split(",", 1)[1])
results = Host.ssh(hosts=arguments.hosts,
command="mkdir ~/cm")
print(Printer.write(results))
results = Host.ssh(hosts=arguments.hosts,
command="cd ~/cm ; git clone https://github.com/cloudmesh/cloudmesh-mpi.git")
print(Printer.write(results))
results = Host.ssh(hosts=manager,
command="cd ~/cm/cloudmesh-mpi/doc/chapters/slurm/configs ; sbatch helloworld.slurm")
print(Printer.write(results))
return ""
|
the-stack_0_25822
|
import collections
import json
import logging
import irc
import regex as re
import requests
from pajbot.managers.schedule import ScheduleManager
from pajbot.modules.ascii import AsciiProtectionModule
log = logging.getLogger(__name__)
class ActionParser:
bot = None
def parse(raw_data=None, data=None, command=''):
try:
from pajbot.userdispatch import UserDispatch
Dispatch = UserDispatch
except ImportError:
from pajbot.dispatch import Dispatch
except:
from pajbot.dispatch import Dispatch
log.exception('Something went wrong while attemting to import UserDispatch')
if not data:
data = json.loads(raw_data)
if data['type'] == 'say':
action = SayAction(data['message'], ActionParser.bot)
elif data['type'] == 'me':
action = MeAction(data['message'], ActionParser.bot)
elif data['type'] == 'whisper':
action = WhisperAction(data['message'], ActionParser.bot)
elif data['type'] == 'reply':
action = ReplyAction(data['message'], ActionParser.bot)
elif data['type'] == 'func':
try:
action = FuncAction(getattr(Dispatch, data['cb']))
except AttributeError as e:
log.error('AttributeError caught when parsing action for action "{}": {}'.format(command, e))
return None
elif data['type'] == 'multi':
action = MultiAction(data['args'], data['default'])
else:
raise Exception('Unknown action type: {0}'.format(data['type']))
return action
def apply_substitutions(text, substitutions, bot, extra):
for needle, sub in substitutions.items():
if sub.key and sub.argument:
param = sub.key
extra['argument'] = MessageAction.get_argument_value(extra['message'], sub.argument - 1)
elif sub.key:
param = sub.key
elif sub.argument:
param = MessageAction.get_argument_value(extra['message'], sub.argument - 1)
else:
log.error('Unknown param for response.')
continue
value = sub.cb(param, extra)
try:
for filter in sub.filters:
value = bot.apply_filter(value, filter)
except:
log.exception('Exception caught in filter application')
if value is None:
return None
text = text.replace(needle, str(value))
return text
class IfSubstitution:
def __call__(self, key, extra={}):
if self.sub.key is None:
msg = MessageAction.get_argument_value(extra.get('message', ''), self.sub.argument - 1)
if msg:
return self.get_true_response(extra)
else:
return self.get_false_response(extra)
else:
res = self.sub.cb(self.sub.key, extra)
if res:
return self.get_true_response(extra)
else:
return self.get_false_response(extra)
def get_true_response(self, extra):
return apply_substitutions(self.true_response, self.true_subs, self.bot, extra)
def get_false_response(self, extra):
return apply_substitutions(self.false_response, self.false_subs, self.bot, extra)
def __init__(self, key, arguments, bot):
self.bot = bot
subs = get_substitutions(key, bot)
if len(subs) == 1:
self.sub = list(subs.values())[0]
else:
subs = get_argument_substitutions(key)
if len(subs) == 1:
self.sub = subs[0]
else:
self.sub = None
self.true_response = arguments[0][2:-1] if len(arguments) > 0 else 'Yes'
self.false_response = arguments[1][2:-1] if len(arguments) > 1 else 'No'
self.true_subs = get_substitutions(self.true_response, bot)
self.false_subs = get_substitutions(self.false_response, bot)
class Substitution:
argument_substitution_regex = re.compile(r'\$\((\d+)\)')
substitution_regex = re.compile(r'\$\(([a-z_]+)(\;[0-9]+)?(\:[\w\.\/ -]+|\:\$\([\w_:;\._\/ -]+\))?(\|[\w]+(\([\w%:/ +-]+\))?)*(\,[\'"]{1}[\w \|$;_\-:()\.]+[\'"]{1}){0,2}\)')
urlfetch_substitution_regex = re.compile(r'\$\(urlfetch ([\w-:/&=.,/? ()_]+)\)')
urlfetch_substitution_regex_all = re.compile(r'\$\(urlfetch (.+?)\)')
def __init__(self, cb, needle, key=None, argument=None, filters=[]):
self.cb = cb
self.key = key
self.argument = argument
self.filters = filters
self.needle = needle
class SubstitutionFilter:
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
class BaseAction:
type = None
subtype = None
def reset(self):
pass
class MultiAction(BaseAction):
type = 'multi'
def __init__(self, args, default=None, fallback=None):
from pajbot.models.command import Command
self.commands = {}
self.default = default
self.fallback = fallback
for command in args:
cmd = Command.from_json(command)
for alias in command['command'].split('|'):
if alias not in self.commands:
self.commands[alias] = cmd
else:
log.error('Alias {0} for this multiaction is already in use.'.format(alias))
import copy
self.original_commands = copy.copy(self.commands)
def reset(self):
import copy
self.commands = copy.copy(self.original_commands)
def __iadd__(self, other):
if other is not None and other.type == 'multi':
self.commands.update(other.commands)
return self
@classmethod
def ready_built(cls, commands, default=None, fallback=None):
""" Useful if you already have a dictionary
with commands pre-built.
"""
multiaction = cls(args=[], default=default, fallback=fallback)
multiaction.commands = commands
import copy
multiaction.original_commands = copy.copy(commands)
return multiaction
def run(self, bot, source, message, event={}, args={}):
""" If there is more text sent to the multicommand after the
initial alias, we _ALWAYS_ assume it's trying the subaction command.
If the extra text was not a valid command, we try to run the fallback command.
In case there's no extra text sent, we will try to run the default command.
"""
cmd = None
if message and len(message) > 0:
msg_lower_parts = message.lower().split(' ')
command = msg_lower_parts[0]
cmd = self.commands.get(command, None)
extra_msg = ' '.join(message.split(' ')[1:])
if cmd is None and self.fallback:
cmd = self.commands.get(self.fallback, None)
extra_msg = message
elif self.default:
command = self.default
cmd = self.commands.get(command, None)
extra_msg = None
if cmd:
if source.level >= cmd.level:
return cmd.run(bot, source, extra_msg, event, args)
else:
log.info('User {0} tried running a sub-command he had no access to ({1}).'.format(source.username, command))
class FuncAction(BaseAction):
type = 'func'
def __init__(self, cb):
self.cb = cb
def run(self, bot, source, message, event={}, args={}):
try:
return self.cb(bot, source, message, event, args)
except:
log.exception('Uncaught exception in FuncAction')
class RawFuncAction(BaseAction):
type = 'rawfunc'
def __init__(self, cb):
self.cb = cb
def run(self, bot, source, message, event={}, args={}):
return self.cb(bot=bot, source=source, message=message, event=event, args=args)
def get_argument_substitutions(string):
"""
Returns a list of `Substitution` objects that are found in the passed `string`.
Will not return multiple `Substitution` objects for the same number.
This means string "$(1) $(1) $(2)" will only return two Substitutions.
"""
argument_substitutions = []
for sub_key in Substitution.argument_substitution_regex.finditer(string):
needle = sub_key.group(0)
argument_num = int(sub_key.group(1))
found = False
for sub in argument_substitutions:
if sub.argument == argument_num:
# We already matched this argument variable
found = True
break
if found:
continue
argument_substitutions.append(Substitution(None, needle=needle, argument=argument_num))
return argument_substitutions
def get_substitution_arguments(sub_key):
sub_string = sub_key.group(0)
path = sub_key.group(1)
argument = sub_key.group(2)
if argument is not None:
argument = int(argument[1:])
key = sub_key.group(3)
if key is not None:
key = key[1:]
matched_filters = sub_key.captures(4)
matched_filter_arguments = sub_key.captures(5)
filters = []
filter_argument_index = 0
for filter in matched_filters:
filter = filter[1:]
filter_arguments = []
if '(' in filter:
filter = filter[:-len(matched_filter_arguments[filter_argument_index])]
filter_arguments = [matched_filter_arguments[filter_argument_index][1:-1]]
filter_argument_index += 1
filter = SubstitutionFilter(filter, filter_arguments)
filters.append(filter)
if_arguments = sub_key.captures(6)
return sub_string, path, argument, key, filters, if_arguments
def get_substitutions(string, bot):
"""
Returns a dictionary of `Substitution` objects thare are found in the passed `string`.
Will not return multiple `Substitution` objects for the same string.
This means "You have $(source:points) points xD $(source:points)" only returns one Substitution.
"""
substitutions = collections.OrderedDict()
for sub_key in Substitution.substitution_regex.finditer(string):
sub_string, path, argument, key, filters, if_arguments = get_substitution_arguments(sub_key)
if sub_string in substitutions:
# We already matched this variable
continue
try:
if path == 'if':
if len(if_arguments) > 0:
if_substitution = IfSubstitution(key, if_arguments, bot)
if if_substitution.sub is None:
continue
sub = Substitution(if_substitution, needle=sub_string, key=key, argument=argument, filters=filters)
substitutions[sub_string] = sub
except:
log.exception('BabyRage')
method_mapping = {}
try:
method_mapping['kvi'] = bot.get_kvi_value
method_mapping['tb'] = bot.get_value
method_mapping['lasttweet'] = bot.get_last_tweet
method_mapping['etm'] = bot.get_emote_tm
method_mapping['ecount'] = bot.get_emote_count
method_mapping['etmrecord'] = bot.get_emote_tm_record
method_mapping['source'] = bot.get_source_value
method_mapping['user'] = bot.get_user_value
method_mapping['usersource'] = bot.get_usersource_value
method_mapping['time'] = bot.get_time_value
method_mapping['curdeck'] = bot.decks.action_get_curdeck
method_mapping['current_stream'] = bot.stream_manager.get_current_stream_value
method_mapping['last_stream'] = bot.stream_manager.get_last_stream_value
method_mapping['current_song'] = bot.get_current_song_value
method_mapping['args'] = bot.get_args_value
method_mapping['strictargs'] = bot.get_strictargs_value
method_mapping['notify'] = bot.get_notify_value
method_mapping['command'] = bot.get_command_value
except AttributeError:
pass
for sub_key in Substitution.substitution_regex.finditer(string):
sub_string, path, argument, key, filters, if_arguments = get_substitution_arguments(sub_key)
if sub_string in substitutions:
# We already matched this variable
continue
if path in method_mapping:
sub = Substitution(method_mapping[path], needle=sub_string, key=key, argument=argument, filters=filters)
substitutions[sub_string] = sub
return substitutions
def get_urlfetch_substitutions(string, all=False):
substitutions = {}
if all:
r = Substitution.urlfetch_substitution_regex_all
else:
r = Substitution.urlfetch_substitution_regex
for sub_key in r.finditer(string):
substitutions[sub_key.group(0)] = sub_key.group(1)
return substitutions
class MessageAction(BaseAction):
type = 'message'
def __init__(self, response, bot):
self.response = response
if bot:
self.argument_subs = get_argument_substitutions(self.response)
self.subs = get_substitutions(self.response, bot)
self.num_urlfetch_subs = len(get_urlfetch_substitutions(self.response, all=True))
else:
self.argument_subs = []
self.subs = {}
self.num_urlfetch_subs = 0
def get_argument_value(message, index):
if not message:
return ''
msg_parts = message.split(' ')
try:
return msg_parts[index]
except:
pass
return ''
def get_response(self, bot, extra):
resp = self.response
resp = apply_substitutions(resp, self.subs, bot, extra)
if resp is None:
return None
for sub in self.argument_subs:
needle = sub.needle
value = str(MessageAction.get_argument_value(extra['message'], sub.argument - 1))
resp = resp.replace(needle, value)
log.debug('Replacing {0} with {1}'.format(needle, value))
if 'command' in extra and 'source' in extra:
if extra['command'].run_through_banphrases is True:
checks = {
'banphrase': (bot.banphrase_manager.check_message, [resp, extra['source']]),
'ascii': (AsciiProtectionModule.check_message, [resp]),
}
# Check banphrases
for check in checks:
# Make sure the module is enabled
if check in bot.module_manager:
res = checks[check][0](*checks[check][1])
if res is not False:
return None
return resp
def get_extra_data(self, source, message, args):
ret = {
'user': source.username if source else None,
'source': source,
'message': message,
}
ret.update(args)
return ret
def run(self, bot, source, message, event={}, args={}):
raise NotImplementedError('Please implement the run method.')
def urlfetch_msg(method, message, num_urlfetch_subs, bot, extra={}, args=[], kwargs={}):
urlfetch_subs = get_urlfetch_substitutions(message)
if len(urlfetch_subs) > num_urlfetch_subs:
log.error('HIJACK ATTEMPT {}'.format(message))
return False
for needle, url in urlfetch_subs.items():
try:
r = requests.get(url)
r.raise_for_status()
value = r.text.strip().replace('\n', '').replace('\r', '')[:400]
except:
return False
message = message.replace(needle, value)
if 'command' in extra and 'source' in extra:
if extra['command'].run_through_banphrases is True:
checks = {
'banphrase': (bot.banphrase_manager.check_message, [message, extra['source']]),
'ascii': (AsciiProtectionModule.check_message, [message]),
}
# Check banphrases
for check in checks:
# Make sure the module is enabled
if check in bot.module_manager:
res = checks[check][0](*checks[check][1])
if res is not False:
return None
args.append(message)
method(*args, **kwargs)
class SayAction(MessageAction):
subtype = 'say'
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if self.num_urlfetch_subs == 0:
return bot.say(resp)
else:
return ScheduleManager.execute_now(urlfetch_msg,
args=[],
kwargs={
'args': [],
'kwargs': {},
'method': bot.say,
'bot': bot,
'extra': extra,
'message': resp,
'num_urlfetch_subs': self.num_urlfetch_subs,
})
class MeAction(MessageAction):
subtype = 'me'
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if self.num_urlfetch_subs == 0:
return bot.me(resp)
else:
return ScheduleManager.execute_now(urlfetch_msg,
args=[],
kwargs={
'args': [],
'kwargs': {},
'method': bot.me,
'bot': bot,
'extra': extra,
'message': resp,
'num_urlfetch_subs': self.num_urlfetch_subs,
})
class WhisperAction(MessageAction):
subtype = 'whisper'
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if self.num_urlfetch_subs == 0:
return bot.whisper(source.username, resp)
else:
return ScheduleManager.execute_now(urlfetch_msg,
args=[],
kwargs={
'args': [source.username],
'kwargs': {},
'method': bot.whisper,
'bot': bot,
'extra': extra,
'message': resp,
'num_urlfetch_subs': self.num_urlfetch_subs,
})
class ReplyAction(MessageAction):
subtype = 'reply'
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if irc.client.is_channel(event.target):
if self.num_urlfetch_subs == 0:
return bot.say(resp, channel=event.target)
else:
return ScheduleManager.execute_now(urlfetch_msg,
args=[],
kwargs={
'args': [],
'kwargs': {
'channel': event.target
},
'method': bot.say,
'bot': bot,
'extra': extra,
'message': resp,
'num_urlfetch_subs': self.num_urlfetch_subs,
})
else:
if self.num_urlfetch_subs == 0:
return bot.whisper(source.username, resp)
else:
return ScheduleManager.execute_now(urlfetch_msg,
args=[],
kwargs={
'args': [source.username],
'kwargs': {},
'method': bot.whisper,
'bot': bot,
'extra': extra,
'message': resp,
'num_urlfetch_subs': self.num_urlfetch_subs,
})
|
the-stack_0_25824
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViTMAE model. """
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST, to_2tuple
if is_vision_available():
from PIL import Image
from transformers import ViTFeatureExtractor
class ViTMAEModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ViTMAEConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = ViTMAEModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
image_size = to_2tuple(self.image_size)
patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
expected_seq_len = int(math.ceil((1 - config.mask_ratio) * (num_patches + 1)))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
def create_and_check_for_pretraining(self, config, pixel_values, labels):
model = ViTMAEForPreTraining(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected sequence length = num_patches
image_size = to_2tuple(self.image_size)
patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
expected_seq_len = num_patches
expected_num_channels = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, expected_seq_len, expected_num_channels))
# test greyscale images
config.num_channels = 1
model = ViTMAEForPreTraining(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
expected_num_channels = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, expected_seq_len, expected_num_channels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class ViTMAEModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = ViTMAEModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# ViTMAE does not use inputs_embeds
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in ViTMAE, the seq_len equals (number of patches + 1) * (1 - mask_ratio), rounded above
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = int(math.ceil((1 - config.mask_ratio) * (num_patches + 1)))
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# ViTMAE has a different seq_length
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = int(math.ceil((1 - config.mask_ratio) * (num_patches + 1)))
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# overwrite from common since ViTMAEForPretraining has random masking, we need to fix the noise
# to generate masks during test
def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict):
# make masks reproducible
np.random.seed(2)
num_patches = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
pt_noise = torch.from_numpy(noise)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
pt_inputs_dict["noise"] = pt_noise
super().check_pt_tf_models(tf_model, pt_model, pt_inputs_dict)
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Make sure we don't have nans
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results."""
)
def test_determinism(self):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results."""
)
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results."""
)
def test_save_load_fast_init_to_base(self):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""")
def test_model_outputs_equivalence(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ViTMAEModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViTMAEModelIntegrationTest(unittest.TestCase):
@cached_property
def default_feature_extractor(self):
return ViTFeatureExtractor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None
@slow
def test_inference_for_pretraining(self):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
vit_mae_config = ViTMAEConfig()
num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
noise = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
outputs = model(**inputs, noise=torch.from_numpy(noise).to(device=torch_device))
# verify the logits
expected_shape = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]]
)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(torch_device), atol=1e-4))
|
the-stack_0_25825
|
from sys import argv
if len(argv) < 3:
print('Usage: python parseSNAPclusters.py <file-input-1> <file-output-1> [--sub0]')
exit(1)
fIn = ""
fOut = ""
sub = 0
for a in argv:
if a == '--sub0':
sub = 0
elif fIn == '':
fIn = a
else:
fOut = a
def getIdx(s):
global sub
return int(s)-sub
clusters = []
m = 0
with open(argv[1], 'r') as f:
while True:
idx = f.readline().split()
l = []
for i in idx:
try:
l.append(getIdx(i))
except Exception:
pass
if len(l) == 0:
break
m = max(max(*l), m)
if len(l) > 2:
clusters.append(l)
print('Clusters', len(clusters))
indexes = [[] for i in range(m+1)]
for ic, c in enumerate(clusters):
for i in c:
indexes[i].append(ic)
data = []
for idx in indexes:
#if len(idx) > 0:
data.append(' '.join([str(i) for i in idx]))
with open(argv[2], 'w') as f:
f.write('\n'.join(data[1:]))
f.write('\n')
|
the-stack_0_25826
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Abstractions over S3's upload/download operations.
This module provides high level abstractions for efficient
uploads/downloads. It handles several things for the user:
* Automatically switching to multipart transfers when
a file is over a specific size threshold
* Uploading/downloading a file in parallel
* Throttling based on max bandwidth
* Progress callbacks to monitor transfers
* Retries. While botocore handles retries for streaming uploads,
it is not possible for it to handle retries for streaming
downloads. This module handles retries for both cases so
you don't need to implement any retry logic yourself.
This module has a reasonable set of defaults. It also allows you
to configure many aspects of the transfer process including:
* Multipart threshold size
* Max parallel downloads
* Max bandwidth
* Socket timeouts
* Retry amounts
There is no support for s3->s3 multipart copies at this
time.
.. _ref_s3transfer_usage:
Usage
=====
The simplest way to use this module is:
.. code-block:: python
client = boto3.client('s3', 'us-west-2')
transfer = S3Transfer(client)
# Upload /tmp/myfile to s3://bucket/key
transfer.upload_file('/tmp/myfile', 'bucket', 'key')
# Download s3://bucket/key to /tmp/myfile
transfer.download_file('bucket', 'key', '/tmp/myfile')
The ``upload_file`` and ``download_file`` methods also accept
``**kwargs``, which will be forwarded through to the corresponding
client operation. Here are a few examples using ``upload_file``::
# Making the object public
transfer.upload_file('/tmp/myfile', 'bucket', 'key',
extra_args={'ACL': 'public-read'})
# Setting metadata
transfer.upload_file('/tmp/myfile', 'bucket', 'key',
extra_args={'Metadata': {'a': 'b', 'c': 'd'}})
# Setting content type
transfer.upload_file('/tmp/myfile.json', 'bucket', 'key',
extra_args={'ContentType': "application/json"})
The ``S3Transfer`` class also supports progress callbacks so you can
provide transfer progress to users. Both the ``upload_file`` and
``download_file`` methods take an optional ``callback`` parameter.
Here's an example of how to print a simple progress percentage
to the user:
.. code-block:: python
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far,
self._size, percentage))
sys.stdout.flush()
transfer = S3Transfer(boto3.client('s3', 'us-west-2'))
# Upload /tmp/myfile to s3://bucket/key and print upload progress.
transfer.upload_file('/tmp/myfile', 'bucket', 'key',
callback=ProgressPercentage('/tmp/myfile'))
You can also provide a TransferConfig object to the S3Transfer
object that gives you more fine grained control over the
transfer. For example:
.. code-block:: python
client = boto3.client('s3', 'us-west-2')
config = TransferConfig(
multipart_threshold=8 * 1024 * 1024,
max_concurrency=10,
num_download_attempts=10,
)
transfer = S3Transfer(client, config)
transfer.upload_file('/tmp/foo', 'bucket', 'key')
"""
import concurrent.futures
import functools
import logging
import math
import os
import queue
import random
import socket
import string
import threading
from botocore.compat import six # noqa: F401
from botocore.exceptions import IncompleteReadError
from botocore.vendored.requests.packages.urllib3.exceptions import (
ReadTimeoutError,
)
import s3transfer.compat
from s3transfer.exceptions import RetriesExceededError, S3UploadFailedError
__author__ = 'Amazon Web Services'
__version__ = '0.5.1'
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
MB = 1024 * 1024
SHUTDOWN_SENTINEL = object()
def random_file_extension(num_digits=8):
return ''.join(random.choice(string.hexdigits) for _ in range(num_digits))
def disable_upload_callbacks(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and hasattr(
request.body, 'disable_callback'
):
request.body.disable_callback()
def enable_upload_callbacks(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and hasattr(
request.body, 'enable_callback'
):
request.body.enable_callback()
class QueueShutdownError(Exception):
pass
class ReadFileChunk:
def __init__(
self,
fileobj,
start_byte,
chunk_size,
full_file_size,
callback=None,
enable_callback=True,
):
"""
Given a file object shown below:
|___________________________________________________|
0 | | full_file_size
|----chunk_size---|
start_byte
:type fileobj: file
:param fileobj: File like object
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callback: function(amount_read)
:param callback: Called whenever data is read from this object.
"""
self._fileobj = fileobj
self._start_byte = start_byte
self._size = self._calculate_file_size(
self._fileobj,
requested_size=chunk_size,
start_byte=start_byte,
actual_file_size=full_file_size,
)
self._fileobj.seek(self._start_byte)
self._amount_read = 0
self._callback = callback
self._callback_enabled = enable_callback
@classmethod
def from_filename(
cls,
filename,
start_byte,
chunk_size,
callback=None,
enable_callback=True,
):
"""Convenience factory function to create from a filename.
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callback: function(amount_read)
:param callback: Called whenever data is read from this object.
:type enable_callback: bool
:param enable_callback: Indicate whether to invoke callback
during read() calls.
:rtype: ``ReadFileChunk``
:return: A new instance of ``ReadFileChunk``
"""
f = open(filename, 'rb')
file_size = os.fstat(f.fileno()).st_size
return cls(
f, start_byte, chunk_size, file_size, callback, enable_callback
)
def _calculate_file_size(
self, fileobj, requested_size, start_byte, actual_file_size
):
max_chunk_size = actual_file_size - start_byte
return min(max_chunk_size, requested_size)
def read(self, amount=None):
if amount is None:
amount_to_read = self._size - self._amount_read
else:
amount_to_read = min(self._size - self._amount_read, amount)
data = self._fileobj.read(amount_to_read)
self._amount_read += len(data)
if self._callback is not None and self._callback_enabled:
self._callback(len(data))
return data
def enable_callback(self):
self._callback_enabled = True
def disable_callback(self):
self._callback_enabled = False
def seek(self, where):
self._fileobj.seek(self._start_byte + where)
if self._callback is not None and self._callback_enabled:
# To also rewind the callback() for an accurate progress report
self._callback(where - self._amount_read)
self._amount_read = where
def close(self):
self._fileobj.close()
def tell(self):
return self._amount_read
def __len__(self):
# __len__ is defined because requests will try to determine the length
# of the stream to set a content length. In the normal case
# of the file it will just stat the file, but we need to change that
# behavior. By providing a __len__, requests will use that instead
# of stat'ing the file.
return self._size
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
# This is a workaround for http://bugs.python.org/issue17575
# Basically httplib will try to iterate over the contents, even
# if its a file like object. This wasn't noticed because we've
# already exhausted the stream so iterating over the file immediately
# stops, which is what we're simulating here.
return iter([])
class StreamReaderProgress:
"""Wrapper for a read only stream that adds progress callbacks."""
def __init__(self, stream, callback=None):
self._stream = stream
self._callback = callback
def read(self, *args, **kwargs):
value = self._stream.read(*args, **kwargs)
if self._callback is not None:
self._callback(len(value))
return value
class OSUtils:
def get_file_size(self, filename):
return os.path.getsize(filename)
def open_file_chunk_reader(self, filename, start_byte, size, callback):
return ReadFileChunk.from_filename(
filename, start_byte, size, callback, enable_callback=False
)
def open(self, filename, mode):
return open(filename, mode)
def remove_file(self, filename):
"""Remove a file, noop if file does not exist."""
# Unlike os.remove, if the file does not exist,
# then this method does nothing.
try:
os.remove(filename)
except OSError:
pass
def rename_file(self, current_filename, new_filename):
s3transfer.compat.rename_file(current_filename, new_filename)
class MultipartUploader:
# These are the extra_args that need to be forwarded onto
# subsequent upload_parts.
UPLOAD_PART_ARGS = [
'SSECustomerKey',
'SSECustomerAlgorithm',
'SSECustomerKeyMD5',
'RequestPayer',
]
def __init__(
self,
client,
config,
osutil,
executor_cls=concurrent.futures.ThreadPoolExecutor,
):
self._client = client
self._config = config
self._os = osutil
self._executor_cls = executor_cls
def _extra_upload_part_args(self, extra_args):
# Only the args in UPLOAD_PART_ARGS actually need to be passed
# onto the upload_part calls.
upload_parts_args = {}
for key, value in extra_args.items():
if key in self.UPLOAD_PART_ARGS:
upload_parts_args[key] = value
return upload_parts_args
def upload_file(self, filename, bucket, key, callback, extra_args):
response = self._client.create_multipart_upload(
Bucket=bucket, Key=key, **extra_args
)
upload_id = response['UploadId']
try:
parts = self._upload_parts(
upload_id, filename, bucket, key, callback, extra_args
)
except Exception as e:
logger.debug(
"Exception raised while uploading parts, "
"aborting multipart upload.",
exc_info=True,
)
self._client.abort_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id
)
raise S3UploadFailedError(
"Failed to upload {} to {}: {}".format(
filename, '/'.join([bucket, key]), e
)
)
self._client.complete_multipart_upload(
Bucket=bucket,
Key=key,
UploadId=upload_id,
MultipartUpload={'Parts': parts},
)
def _upload_parts(
self, upload_id, filename, bucket, key, callback, extra_args
):
upload_parts_extra_args = self._extra_upload_part_args(extra_args)
parts = []
part_size = self._config.multipart_chunksize
num_parts = int(
math.ceil(self._os.get_file_size(filename) / float(part_size))
)
max_workers = self._config.max_concurrency
with self._executor_cls(max_workers=max_workers) as executor:
upload_partial = functools.partial(
self._upload_one_part,
filename,
bucket,
key,
upload_id,
part_size,
upload_parts_extra_args,
callback,
)
for part in executor.map(upload_partial, range(1, num_parts + 1)):
parts.append(part)
return parts
def _upload_one_part(
self,
filename,
bucket,
key,
upload_id,
part_size,
extra_args,
callback,
part_number,
):
open_chunk_reader = self._os.open_file_chunk_reader
with open_chunk_reader(
filename, part_size * (part_number - 1), part_size, callback
) as body:
response = self._client.upload_part(
Bucket=bucket,
Key=key,
UploadId=upload_id,
PartNumber=part_number,
Body=body,
**extra_args,
)
etag = response['ETag']
return {'ETag': etag, 'PartNumber': part_number}
class ShutdownQueue(queue.Queue):
"""A queue implementation that can be shutdown.
Shutting down a queue means that this class adds a
trigger_shutdown method that will trigger all subsequent
calls to put() to fail with a ``QueueShutdownError``.
It purposefully deviates from queue.Queue, and is *not* meant
to be a drop in replacement for ``queue.Queue``.
"""
def _init(self, maxsize):
self._shutdown = False
self._shutdown_lock = threading.Lock()
# queue.Queue is an old style class so we don't use super().
return queue.Queue._init(self, maxsize)
def trigger_shutdown(self):
with self._shutdown_lock:
self._shutdown = True
logger.debug("The IO queue is now shutdown.")
def put(self, item):
# Note: this is not sufficient, it's still possible to deadlock!
# Need to hook into the condition vars used by this class.
with self._shutdown_lock:
if self._shutdown:
raise QueueShutdownError(
"Cannot put item to queue when " "queue has been shutdown."
)
return queue.Queue.put(self, item)
class MultipartDownloader:
def __init__(
self,
client,
config,
osutil,
executor_cls=concurrent.futures.ThreadPoolExecutor,
):
self._client = client
self._config = config
self._os = osutil
self._executor_cls = executor_cls
self._ioqueue = ShutdownQueue(self._config.max_io_queue)
def download_file(
self, bucket, key, filename, object_size, extra_args, callback=None
):
with self._executor_cls(max_workers=2) as controller:
# 1 thread for the future that manages the uploading of files
# 1 thread for the future that manages IO writes.
download_parts_handler = functools.partial(
self._download_file_as_future,
bucket,
key,
filename,
object_size,
callback,
)
parts_future = controller.submit(download_parts_handler)
io_writes_handler = functools.partial(
self._perform_io_writes, filename
)
io_future = controller.submit(io_writes_handler)
results = concurrent.futures.wait(
[parts_future, io_future],
return_when=concurrent.futures.FIRST_EXCEPTION,
)
self._process_future_results(results)
def _process_future_results(self, futures):
finished, unfinished = futures
for future in finished:
future.result()
def _download_file_as_future(
self, bucket, key, filename, object_size, callback
):
part_size = self._config.multipart_chunksize
num_parts = int(math.ceil(object_size / float(part_size)))
max_workers = self._config.max_concurrency
download_partial = functools.partial(
self._download_range,
bucket,
key,
filename,
part_size,
num_parts,
callback,
)
try:
with self._executor_cls(max_workers=max_workers) as executor:
list(executor.map(download_partial, range(num_parts)))
finally:
self._ioqueue.put(SHUTDOWN_SENTINEL)
def _calculate_range_param(self, part_size, part_index, num_parts):
start_range = part_index * part_size
if part_index == num_parts - 1:
end_range = ''
else:
end_range = start_range + part_size - 1
range_param = f'bytes={start_range}-{end_range}'
return range_param
def _download_range(
self, bucket, key, filename, part_size, num_parts, callback, part_index
):
try:
range_param = self._calculate_range_param(
part_size, part_index, num_parts
)
max_attempts = self._config.num_download_attempts
last_exception = None
for i in range(max_attempts):
try:
logger.debug("Making get_object call.")
response = self._client.get_object(
Bucket=bucket, Key=key, Range=range_param
)
streaming_body = StreamReaderProgress(
response['Body'], callback
)
buffer_size = 1024 * 16
current_index = part_size * part_index
for chunk in iter(
lambda: streaming_body.read(buffer_size), b''
):
self._ioqueue.put((current_index, chunk))
current_index += len(chunk)
return
except (
socket.timeout,
OSError,
ReadTimeoutError,
IncompleteReadError,
) as e:
logger.debug(
"Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)",
e,
i,
max_attempts,
exc_info=True,
)
last_exception = e
continue
raise RetriesExceededError(last_exception)
finally:
logger.debug("EXITING _download_range for part: %s", part_index)
def _perform_io_writes(self, filename):
with self._os.open(filename, 'wb') as f:
while True:
task = self._ioqueue.get()
if task is SHUTDOWN_SENTINEL:
logger.debug(
"Shutdown sentinel received in IO handler, "
"shutting down IO handler."
)
return
else:
try:
offset, data = task
f.seek(offset)
f.write(data)
except Exception as e:
logger.debug(
"Caught exception in IO thread: %s",
e,
exc_info=True,
)
self._ioqueue.trigger_shutdown()
raise
class TransferConfig:
def __init__(
self,
multipart_threshold=8 * MB,
max_concurrency=10,
multipart_chunksize=8 * MB,
num_download_attempts=5,
max_io_queue=100,
):
self.multipart_threshold = multipart_threshold
self.max_concurrency = max_concurrency
self.multipart_chunksize = multipart_chunksize
self.num_download_attempts = num_download_attempts
self.max_io_queue = max_io_queue
class S3Transfer:
ALLOWED_DOWNLOAD_ARGS = [
'VersionId',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'RequestPayer',
]
ALLOWED_UPLOAD_ARGS = [
'ACL',
'CacheControl',
'ContentDisposition',
'ContentEncoding',
'ContentLanguage',
'ContentType',
'Expires',
'GrantFullControl',
'GrantRead',
'GrantReadACP',
'GrantWriteACL',
'Metadata',
'RequestPayer',
'ServerSideEncryption',
'StorageClass',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'SSEKMSKeyId',
'SSEKMSEncryptionContext',
'Tagging',
]
def __init__(self, client, config=None, osutil=None):
self._client = client
if config is None:
config = TransferConfig()
self._config = config
if osutil is None:
osutil = OSUtils()
self._osutil = osutil
def upload_file(
self, filename, bucket, key, callback=None, extra_args=None
):
"""Upload a file to an S3 object.
Variants have also been injected into S3 client, Bucket and Object.
You don't have to use S3Transfer.upload_file() directly.
"""
if extra_args is None:
extra_args = {}
self._validate_all_known_args(extra_args, self.ALLOWED_UPLOAD_ARGS)
events = self._client.meta.events
events.register_first(
'request-created.s3',
disable_upload_callbacks,
unique_id='s3upload-callback-disable',
)
events.register_last(
'request-created.s3',
enable_upload_callbacks,
unique_id='s3upload-callback-enable',
)
if (
self._osutil.get_file_size(filename)
>= self._config.multipart_threshold
):
self._multipart_upload(filename, bucket, key, callback, extra_args)
else:
self._put_object(filename, bucket, key, callback, extra_args)
def _put_object(self, filename, bucket, key, callback, extra_args):
# We're using open_file_chunk_reader so we can take advantage of the
# progress callback functionality.
open_chunk_reader = self._osutil.open_file_chunk_reader
with open_chunk_reader(
filename,
0,
self._osutil.get_file_size(filename),
callback=callback,
) as body:
self._client.put_object(
Bucket=bucket, Key=key, Body=body, **extra_args
)
def download_file(
self, bucket, key, filename, extra_args=None, callback=None
):
"""Download an S3 object to a file.
Variants have also been injected into S3 client, Bucket and Object.
You don't have to use S3Transfer.download_file() directly.
"""
# This method will issue a ``head_object`` request to determine
# the size of the S3 object. This is used to determine if the
# object is downloaded in parallel.
if extra_args is None:
extra_args = {}
self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS)
object_size = self._object_size(bucket, key, extra_args)
temp_filename = filename + os.extsep + random_file_extension()
try:
self._download_file(
bucket, key, temp_filename, object_size, extra_args, callback
)
except Exception:
logger.debug(
"Exception caught in download_file, removing partial "
"file: %s",
temp_filename,
exc_info=True,
)
self._osutil.remove_file(temp_filename)
raise
else:
self._osutil.rename_file(temp_filename, filename)
def _download_file(
self, bucket, key, filename, object_size, extra_args, callback
):
if object_size >= self._config.multipart_threshold:
self._ranged_download(
bucket, key, filename, object_size, extra_args, callback
)
else:
self._get_object(bucket, key, filename, extra_args, callback)
def _validate_all_known_args(self, actual, allowed):
for kwarg in actual:
if kwarg not in allowed:
raise ValueError(
"Invalid extra_args key '%s', "
"must be one of: %s" % (kwarg, ', '.join(allowed))
)
def _ranged_download(
self, bucket, key, filename, object_size, extra_args, callback
):
downloader = MultipartDownloader(
self._client, self._config, self._osutil
)
downloader.download_file(
bucket, key, filename, object_size, extra_args, callback
)
def _get_object(self, bucket, key, filename, extra_args, callback):
# precondition: num_download_attempts > 0
max_attempts = self._config.num_download_attempts
last_exception = None
for i in range(max_attempts):
try:
return self._do_get_object(
bucket, key, filename, extra_args, callback
)
except (
socket.timeout,
OSError,
ReadTimeoutError,
IncompleteReadError,
) as e:
# TODO: we need a way to reset the callback if the
# download failed.
logger.debug(
"Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)",
e,
i,
max_attempts,
exc_info=True,
)
last_exception = e
continue
raise RetriesExceededError(last_exception)
def _do_get_object(self, bucket, key, filename, extra_args, callback):
response = self._client.get_object(
Bucket=bucket, Key=key, **extra_args
)
streaming_body = StreamReaderProgress(response['Body'], callback)
with self._osutil.open(filename, 'wb') as f:
for chunk in iter(lambda: streaming_body.read(8192), b''):
f.write(chunk)
def _object_size(self, bucket, key, extra_args):
return self._client.head_object(Bucket=bucket, Key=key, **extra_args)[
'ContentLength'
]
def _multipart_upload(self, filename, bucket, key, callback, extra_args):
uploader = MultipartUploader(self._client, self._config, self._osutil)
uploader.upload_file(filename, bucket, key, callback, extra_args)
|
the-stack_0_25828
|
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Storage Query and Access API module
"""
from __future__ import absolute_import, division, print_function
import logging
from collections import defaultdict
from itertools import chain, groupby
import numpy
from ..model import GeoBox
from .core import Datacube, Group, get_bounds, datatset_type_to_row
from .query import Query
_LOG = logging.getLogger(__name__)
class API(object):
"""
Interface for use by the ``AnalyticsEngine`` and ``ExecutionEngine`` modules.
"""
def __init__(self, index=None, app=None, datacube=None):
"""
Creates the interface for query and storage access.
If no datacube or index is given, the default configuration is used for database connection, etc.
:param index: The database index to use.
:type index: from :py:class:`datacube.index.index_connect` or None
:param app: A short, alphanumeric name to identify this application.
The application name is used to track down problems with database queries, so it is strongly
advised that be used. If an index is supplied, application name is ignored.
:type app: string, required if no index is given
:param datacube:
:type datacube: :class:`datacube.Datacube`
"""
if datacube is not None:
self.datacube = datacube
elif index is not None:
self.datacube = Datacube(index)
else:
app = app or 'Datacube-API'
self.datacube = Datacube(app=app)
def _get_descriptor_for_dataset(self, dataset_type, datasets, group_by, geopolygon=None,
include_storage_units=True):
dataset_descriptor = {}
if not (dataset_type.grid_spec and dataset_type.grid_spec.dimensions):
return None
if not geopolygon:
geopolygon = get_bounds(datasets, dataset_type.grid_spec.crs)
datasets.sort(key=group_by.group_by_func)
groups = [Group(key, list(group)) for key, group in groupby(datasets, group_by.group_by_func)]
dataset_descriptor['result_min'] = tuple()
dataset_descriptor['result_max'] = tuple()
dataset_descriptor['result_shape'] = tuple()
dataset_descriptor['irregular_indices'] = {}
geobox = GeoBox.from_geopolygon(geopolygon.to_crs(dataset_type.grid_spec.crs),
dataset_type.grid_spec.resolution)
dims = dataset_type.dimensions
spatial_dims = dataset_type.grid_spec.dimensions
dataset_descriptor['dimensions'] = list(dims)
for dim in dims:
if dim in spatial_dims:
coords = geobox.coordinates[dim].labels
elif dim == group_by.dimension:
coords = [group.key for group in groups]
dataset_descriptor['irregular_indices'][dim] = coords
else:
# not supported yet...
continue
dataset_descriptor['result_min'] += (min(coords),)
dataset_descriptor['result_max'] += (max(coords),)
dataset_descriptor['result_shape'] += (len(coords),)
if dataset_type.measurements:
dataset_descriptor['variables'] = self._get_descriptor_for_measurements(dataset_type)
dataset_descriptor['groups'] = (dataset_type, groups)
if include_storage_units:
dataset_descriptor['storage_units'] = self._compute_storage_units(dataset_type, datasets)
return dataset_descriptor
@staticmethod
def _compute_storage_units(dataset_type, datasets):
storage_units = {}
def dataset_path(ds):
return str(ds.local_path)
datasets.sort(key=dataset_path)
for path, datasets in groupby(datasets, key=dataset_path):
datasets = list(datasets)
su = {}
times = [dataset.center_time for dataset in datasets]
xs = [x for dataset in datasets for x in (dataset.bounds.left, dataset.bounds.right)]
ys = [y for dataset in datasets for y in (dataset.bounds.top, dataset.bounds.bottom)]
su['storage_shape'] = tuple([len(times)] + dataset_type.grid_spec.tile_resolution)
su['storage_min'] = min(times), min(ys), min(xs)
su['storage_max'] = max(times), max(ys), max(xs)
su['storage_path'] = path
su['irregular_indices'] = {'time': times}
storage_units[(min(times), max(ys), min(xs))] = su
return storage_units
@staticmethod
def _get_descriptor_for_measurements(dataset_type):
data_vars = {}
for k, v in dataset_type.measurements.items():
var_desc = {}
if 'dtype' in v:
var_desc['datatype_name'] = v['dtype']
if 'nodata' in v:
var_desc['nodata_value'] = v['nodata']
data_vars[k] = var_desc
return data_vars
def get_descriptor(self, descriptor_request=None, include_storage_units=True):
"""
Gets the metadata for a ``AnalyticsEngine`` query.
All fields are optional.
**Dimensions**
Dimensions can specify a range by label, and optionally a CRS to interpret the label.
The default CRS interpretation for geospatial dimensions (longitude/latitude or x/y) is WGS84/EPSG:4326,
even if the resulting dimension is in another projection.
:param descriptor_request: The request query, formatted as:
::
descriptor_request = {
'platform': 'LANDSAT_8',
'product_type': 'nbar',
'dimensions': {
'x': {
'range': (140, 142),
'crs': 'EPSG:4326'
},
'y': {
'range': (-36, -35),
'crs': 'EPSG:4326'
},
'time': {
'range': ((1990, 6, 1), (1992, 7 ,1)),
'group_by': 'solar_day'
}
},
}
:type descriptor_request: dict or None
:param include_storage_units: Include the list of storage units
:type include_storage_units: bool, optional
:return: A descriptor dict of the query, containing the metadata of the request
::
descriptor = {
'ls5_nbar_albers': { # product identifier
'dimensions': ['x', 'y', 'time'],
'variables': { # Variables which can be accessed as arrays
'B10': {
'datatype': 'int16',
'nodata_value': -999
},
'B20': {
'datatype': 'int16',
'nodata_value': -999
},
'B30': {
'datatype': 'int16',
'nodata_value': -999
}
},
'result_min': (140, -36, 1293840000),
'result_max': (141, -35, 1325376000),
'result_shape': (8000, 8000, 40), # Overall size of result set
'irregular_indices': {
# Regularly indexed dimensions (e.g. x & y) won't be specified
'time': date_array # Array of days since 1/1/1970
},
'storage_units': {
(140, -36, 1990): { # Storage unit indices
'storage_min': (140, -36, 1293840000),
'storage_max': (141, -35, 1293800400),
'storage_shape': (4000, 4000, 24),
'storage_path': '/path/to/storage/units/nbar_140_-36_1990.nc',
},
(140, -36, 1991): { # Storage unit indices
'storage_min': (140, -36, 1293800400),
'storage_max': (141, -35, 1325376000),
'storage_shape': (4000, 4000, 23),
'storage_path': '/path/to/storage/units/nbar_140_-36_1991.nc',
},
# ...
# <more storage_unit sub-descriptors>
# ...
},
# ...
# <more storage unit type sub-descriptors>
# ...
}
}
.. seealso:: :meth:`get_descriptor`
"""
query = Query.from_descriptor_request(descriptor_request)
descriptor = {}
datasets_by_type = self._search_datasets_by_type(**query.search_terms)
for dataset_type, datasets in datasets_by_type.items():
dataset_descriptor = self._get_descriptor_for_dataset(dataset_type, datasets,
query.group_by,
query.geopolygon,
include_storage_units)
if dataset_descriptor:
descriptor[dataset_type.name] = dataset_descriptor
return descriptor
def _search_datasets_by_type(self, **query):
datasets = self.datacube.index.datasets.search(**query)
datasets_by_type = defaultdict(list)
for dataset in datasets:
datasets_by_type[dataset.type].append(dataset)
return datasets_by_type
def _get_dataset_groups(self, query):
dataset_groups = {}
group_by = query.group_by
datasets_by_type = self._search_datasets_by_type(**query.search_terms)
for dataset_type, datasets in datasets_by_type.items():
if dataset_type.grid_spec:
dataset_groups[dataset_type] = self.datacube.product_sources(datasets,
group_by.group_by_func,
group_by.dimension,
group_by.units)
return dataset_groups
def get_data(self, data_request, dataset_groups=None, return_all=False):
"""
Gets the data for a ``ExecutionEngine`` query.
Function to return composite in-memory arrays.
:param data_request: A dictionary containing the query parameters. All fields are optional.
**Search fields**
Search for any of the fields returned by :meth:`list_fields()`, using a value from
:meth:`list_field_values()`.
**Storage type field**
The ``storage_type`` can be any of the keys returned by :meth:`get_descriptor()` or
:meth:`list_storage_type_names()`.
**Variables field**
The ``variables`` field is a list of variable names matching those listed by :meth:`get_descriptor()` or
:meth:`list_variables()`.
If not specified, all variables are returned.
**Dimensions field**
The ``dimensions`` field can specify a range by label and/or index, and optionally a CRS to interpret
the label range request.
Times can be specified as :class:`datetime` objects, tuples of (year, month, day) or
(year, month, day, hour, minute, second), or by seconds since the Unix epoch.
Strings may also be used, with ISO format preferred.
The default CRS interpretation for geospatial dimensions (longitude/latitude or x/y) is WGS84/EPSG:4326,
even if the resulting dimension is in another projection.
The ``array_range`` field can be used to subset the request.
::
descriptor = {
'platform': 'LANDSAT_8',
'product': 'NBAR',
# <search_field>: <search value>,
'storage_type': 'ls8_nbar',
'variables': ('B30', 'B40'),
'dimensions': {
'x': {
'range': (140, 142),
'array_range': (0, 127),
'crs': 'EPSG:4326'
},
'y': {
'range': (-36, -35),
'array_range': (0, 127),
'crs': 'EPSG:4326'
},
'time': {
'range': (1293840000, 1325376000),
'array_range': (0, 127)
}
},
}
:type data_request: dict or None
:param dataset_groups: dict mapping dataset_type to sequence of Group pairs.
If not provided, the index is queried.
:param return_all: If True, data from all requested products is returned,
otherwise only the first result is returned.
:type dataset_groups: dict{dataset_type: list(Group(key, list(datasets)))}
:return: A mapping product
.. seealso:: :meth:`get_descriptor`
"""
query = Query.from_descriptor_request(data_request)
# If the user has not provided `groups` from get_descriptor call, retrieve them from the index
if dataset_groups is None:
dataset_groups = self._get_dataset_groups(query)
all_datasets = {dt.name: self._get_data_for_type(dt, sources, query.measurements,
query.geopolygon, query.slices)
for dt, sources in dataset_groups.items()}
if all_datasets and not return_all:
type_name, data_descriptor = all_datasets.popitem()
return data_descriptor
return all_datasets
def _get_data_for_type(self, dataset_type, sources, measurements, geopolygon, slices=None):
dt_data = {}
datasets = list(chain.from_iterable(g for _, g in numpy.ndenumerate(sources)))
if not geopolygon:
geopolygon = get_bounds(datasets, dataset_type.grid_spec.crs)
geobox = GeoBox.from_geopolygon(geopolygon.to_crs(dataset_type.grid_spec.crs),
dataset_type.grid_spec.resolution)
if slices:
_rename_spatial_keys(slices, geobox.dimensions)
geo_slices = [slices.get(dim, slice(None)) for dim in geobox.dimensions]
geobox = geobox[geo_slices]
for dim, dim_slice in slices.items():
if dim in sources.dims:
sources = sources.isel(dim=dim_slice)
dt_data.update(self._get_data_for_dims(dataset_type, sources, geobox))
dt_data.update(self._get_data_for_measurement(dataset_type, sources, measurements, geobox))
return dt_data
@staticmethod
def _get_data_for_dims(dataset_type, sources, geobox):
dims = dataset_type.dimensions
dt_data = {
'dimensions': list(dims),
'indices': {},
'element_sizes': [],
'coordinate_reference_systems': [],
'size': tuple()
}
for dim in dims:
if dim in dataset_type.grid_spec.dimensions:
dt_data['indices'][dim] = geobox.coordinates[dim].labels
dim_i = dataset_type.grid_spec.dimensions.index(dim)
dt_data['element_sizes'].append(abs(dataset_type.grid_spec.resolution[dim_i]))
dt_data['coordinate_reference_systems'].append({
'reference_system_definition': str(geobox.crs),
'reference_system_unit': geobox.coordinates[dim].units
})
dt_data['size'] += (geobox.coordinates[dim].labels.size, )
elif dim in sources.dims:
coords = sources.coords[dim].values
dt_data['indices'][dim] = coords
dt_data['size'] += (coords.size, )
dt_data['coordinate_reference_systems'].append({
'reference_system_definition': 'UTC',
'reference_system_unit': 'seconds since 1970-01-01 00:00:00'
})
if len(coords) < 2:
dt_data['element_sizes'].append(numpy.NaN)
else:
dt_data['element_sizes'].append(abs(coords[0] - coords[1]))
else:
raise NotImplementedError('Unsupported dimension type: ', dim)
return dt_data
def _get_data_for_measurement(self, dataset_type, sources, measurements, geobox):
dt_data = {
'arrays': {}
}
for measurement_name, measurement in dataset_type.measurements.items():
if measurements is None or measurement_name in measurements:
dt_data['arrays'][measurement_name] = self.datacube.measurement_data_lazy(sources, geobox, measurement)
return dt_data
def list_products(self):
"""
Lists the products in the datacube.
:return: list of dictionaries describing the products
"""
return [datatset_type_to_row(dataset_type) for dataset_type in self.datacube.index.datasets.types.get_all()]
def list_variables(self):
"""
Lists the variables of products in the datacube.
Variables are also referred to as measurements or bands.
:return: list of dictionaries describing the variables
"""
return self.datacube.list_measurements(with_pandas=False)
def __repr__(self):
return "API<datacube={!r}>".format(self.datacube.index)
SPATIAL_KEYS = [('latitude', 'lat', 'y'), ('longitude', 'lon', 'long', 'x')]
def _rename_spatial_keys(dictionary, dimensions):
for alt_keys in SPATIAL_KEYS:
match = [dim_key for dim_key in dimensions if dim_key in alt_keys]
for dim_key in match:
for old_key in alt_keys:
if old_key in dictionary:
dictionary[dim_key] = dictionary.pop(old_key)
def main():
agdc_api = API()
desc = agdc_api.get_descriptor()
#print(desc)
if __name__ == '__main__':
main()
|
the-stack_0_25829
|
from pydantic import Field, constr, validator
import importlib
import hashlib
import json
from typing import Any, List, Dict, Tuple, Optional, Union
import numpy
from pathlib import Path
from cmselemental.types import Array
from mmelemental.util.data import (
float_prep,
NUMPY_UNI,
NUMPY_INT,
NUMPY_FLOAT,
GEOMETRY_NOISE,
MASS_NOISE,
CHARGE_NOISE,
)
# MM models
from mmelemental.models.base import ProtoModel, Provenance, provenance_stamp
from mmelemental.models.util.output import FileOutput
from .nonbonded import NonBonded
from .bonded import Bonds, Angles, Dihedrals
_trans_nfound_msg = "MMElemental translation requires mmic_translator. \
Solve by: pip install mmic_translator"
mmschema_forcefield_default = "mmschema_forcefield"
__all__ = ["ForceField", "ForcesInput"]
class ImproperDihedrals(ProtoModel):
im_dihedrals: Optional[Array[Array[float]]] = Field(
None, description="Improper dihedral/torsion parameters."
)
im_dihedrals_type: Optional[List[str]] = Field(
None,
description="Improper dihedral potential form e.g. harmonic, fourier, etc.",
)
class ForcesInput(ProtoModel):
method: str = Field(..., description="")
cutoff: Optional[float] = Field(None, description="")
cutoff_units: Optional[str] = Field("angstrom", description="")
modifier: Optional[str] = Field(None, description="")
dielectric: Optional[float] = Field(numpy.inf, description="")
correct: Optional[str] = Field(None, description="")
class ForceField(ProtoModel):
schema_name: constr(
strip_whitespace=True, regex="^(mmschema_forcefield)$"
) = Field( # type: ignore
mmschema_forcefield_default,
description=(
f"The MMSchema specification to which this model conforms. Explicitly fixed as {mmschema_forcefield_default}."
),
)
schema_version: int = Field( # type: ignore
0,
description="The version number of ``schema_name`` to which this model conforms.",
)
name: Optional[str] = Field( # type: ignore
None,
description="Common or human-readable name to assign to this model. This field can be arbitrary.",
)
comment: Optional[str] = Field( # type: ignore
None,
description="Additional comments for this model. Intended for pure human/user consumption and clarity.",
)
symbols: Array[str] = Field( # type: ignore
...,
description="An ordered (natom,) list of particle (e.g. atomic elemental) symbols.",
)
nonbonded: Optional[Union[NonBonded, List[NonBonded]]] = Field( # type: ignore
None, description="Non-bonded parameters model."
)
bonds: Optional[Union[Bonds, List[Bonds]]] = Field( # type: ignore
None, description="2-body covalent bond model."
)
angles: Optional[Union[Angles, List[Angles]]] = Field( # type: ignore
None, description="3-body angular bond model."
)
dihedrals: Optional[Union[Dihedrals, List[Dihedrals]]] = Field( # type: ignore
None, description="4-body torsional bond model."
)
# im_dihedrals: Optional[Union[ImproperDihedrals, List[Dihedrals]]] = Field( # type: ignore
# None, description="Improper dihedral bond model."
# )
charges: Optional[Array[numpy.dtype(NUMPY_FLOAT)]] = Field(
None, description="Atomic charges. Default unit is in elementary charge units."
)
charges_units: Optional[str] = Field("e", description="Atomic charge unit.")
masses: Optional[Array[numpy.dtype(NUMPY_FLOAT)]] = Field( # type: ignore
None,
description="List of atomic masses. If not provided, the mass of each atom is inferred from its most common isotope. "
"If this is provided, it must be the same length as ``symbols``.",
)
masses_units: Optional[str] = Field( # type: ignore
"amu",
description="Units for atomic masses. Defaults to unified atomic mass unit.",
)
charge_groups: Optional[Array[numpy.dtype(NUMPY_INT)]] = Field(
None, description="Charge groups per atom. Length of the array must be natoms."
)
exclusions: Optional[str] = Field( # type: ignore
None,
description="Which pairs of bonded atoms to exclude from non-bonded calculations. \
The rules to apply in choosing bonded exclusions are specifed in the configuration file using the exclude parameter. The \
choices for exclusions are None, 1-2, 1-3, 1-4, etc. With None, no atom pairs are excluded. With 1-2, only atoms that are connected \
via a linear bond are excluded. With 1-3, any pair of atoms connected via a bond or bonded to a common third atom are excluded. \
With 1-4, any atoms that are connected by a linear bond, or a sequence of two bonds, or a sequence of three bonds, are excluded. \
With scaled1-4, exclusions are applied in the same manner as the 1-3 setting, but those pairs that are connected by a sequence of \
3 bonds are calculated using the modified 1-4 methods described rather than the standard force calculations.",
)
inclusions: Optional[str] = Field( # type: ignore
None,
description="Which pairs of 1-4 excluded bonded atoms to include in non-bonded calculations.",
)
# switch_width="1.0 * angstrom",
# cutoff="9.0 * angstrom" ,
# method="cutoff",
identifier: Optional[str] = Field( # type: ignore
None, description="Forcefield unique identifier e.g. charmm27, amber99, etc."
)
defs: Optional[List[str]] = Field( # type: ignore
None,
description="Particle definition. For atomic forcefields, this could be the atom type (e.g. HH31) or SMIRKS (OFF) representation. "
"The type names are associated with the atomic elements defined in other objects e.g. see the :class:``Molecule`` model.",
)
substructs: Optional[List[Tuple[str, int]]] = Field(
None,
description="A list of substructure names the particles belong to. E.g. [('ALA', 4), ('ACE', 0)] means atom1 belong to residue ALA (alanine) "
"with residue number 4, while atom2 belongs to residue ACE (acetyl) with residue number 0.",
)
templates: Optional[Dict[str, List[str]]] = Field(
None,
description="A list of template definitions typically in terms of atom types. E.g. {'ACE': ['HH31', 'CH3', 'HH32', 'HH33', 'C', 'O']}.",
)
combination_rule: Optional[str] = Field(
"Lorentz-Berthelot", description="Combination rule for the force field."
)
atomic_numbers: Optional[Array[numpy.int16]] = Field( # type: ignore
None,
description="An optional ordered 1-D array-like object of atomic numbers of shape (nat,). Index "
"matches the 0-indexed indices of all other per-atom settings like ``symbols``. "
"Values are inferred from the ``symbols`` list if not explicitly set. ",
)
# Extras
provenance: Provenance = Field( # type: ignore
provenance_stamp(__name__),
description="The provenance information about how this object (and its attributes) were generated, "
"provided, and manipulated.",
)
extras: Dict[str, Any] = Field( # type: ignore
None,
description="Additional information to bundle with the object. Use for schema development and scratch space.",
)
class Config(ProtoModel.Config):
repr_style = lambda self: [("name", self.name), ("hash", self.get_hash()[:7])]
def __init__(self, **kwargs: Optional[Dict[str, Any]]) -> None:
"""
Initializes the molecule object from dictionary-like values.
Parameters
----------
**kwargs : Any
The values of the Molecule object attributes.
"""
kwargs["schema_name"] = kwargs.pop("schema_name", "mmschema_forcefield")
kwargs["schema_version"] = kwargs.pop("schema_version", 0)
atomic_numbers = kwargs.get("atomic_numbers")
if atomic_numbers is not None:
if kwargs.get("symbols") is None:
kwargs["symbols"] = [
qcelemental.periodictable.to_E(x) for x in atomic_numbers
]
# We are pulling out the values *explicitly* so that the pydantic skip_defaults works as expected
# All attributes set below are equivalent to the default set.
super().__init__(**kwargs)
values = self.__dict__
if not values.get("name"):
values["name"] = "forcefield"
# Representation -> used by qcelemental's __repr__
def __repr_args__(self) -> "ReprArgs":
forms = [
form.__class__.__name__
for form in (self.nonbonded, self.bonds, self.angles, self.dihedrals)
if form
]
return [("name", self.name), ("form", forms), ("hash", self.get_hash()[:7])]
# Validators
@validator("charges")
def _charges_length(cls, v, values):
assert len(v.shape) == 1, "Atomic charges must be a 1D array!"
return v
# Constructors
@classmethod
def from_file(
cls,
filename: str,
dtype: Optional[str] = None,
translator: Optional[str] = None,
**kwargs,
) -> "ForceField":
"""
Constructs a ForceField object from a file.
Parameters
----------
filename: str
The topology or FF filename to build from.
dtype: Optional[str], optional
The type of file to interpret e.g. psf. If unset, mmelemental attempts to discover the file type.
translator: Optional[str], optional
Translator name e.g. mmic_parmed. Takes precedence over dtype. If unset, MMElemental attempts
to find an appropriate translator if it is registered in the :class:`TransComponent` class.
**kwargs: Optional[Dict[str, Any]], optional
Any additional keywords to pass to the constructor.
Returns
-------
ForceField
A constructed ForceField object.
"""
file_ext = Path(filename).suffix if filename else None
if file_ext in [".json"]:
dtype = file_ext.strip(".")
# Raw string type, read and pass through
if dtype == "json":
with open(filename, "r") as infile:
data = json.load(infile)
dtype = "dict"
else:
raise KeyError(f"Data type not supported: {dtype}.")
return cls.from_data(data, dtype=dtype, **kwargs)
fileobj = FileOutput(path=filename) if filename else None
dtype = dtype or fileobj.ext.strip(".")
ext = "." + dtype
# Generic translator component
try:
from mmic_translator.components import TransComponent
except Exception:
TransComponent = None
if not translator:
if not TransComponent:
raise ModuleNotFoundError(_trans_nfound_msg)
from mmic_translator.components.supported import reg_trans
reg_trans = list(reg_trans)
while not translator:
translator = TransComponent.find_ffread_tk(ext, trans=reg_trans)
if not translator:
raise ValueError(
f"Could not read top file with ext {ext}. Please install an appropriate translator."
)
# Make sure we can import the translator module
if importlib.util.find_spec(translator):
mod = importlib.import_module(translator)
elif importlib.util.find_spec(translator):
mod = importlib.import_module(translator)
tkff_class = mod._classes_map.get("ForceField")
if not tkff_class:
raise ValueError(
f"No ForceField model found while looking in translator: {translator}."
)
tkff = tkff_class.from_file(
filename=fileobj.abs_path if fileobj else None,
dtype=dtype,
)
return cls.from_data(tkff, dtype=tkff.dtype, **kwargs)
@classmethod
def from_data(cls, data: Any, **kwargs) -> "ForceField":
"""
Constructs a ForceField object from a data object.
Parameters
----------
data: Any
Data to construct ForceField from.
**kwargs: Optional[Dict[str, Any]], optional
Additional kwargs to pass to the constructors.
Returns
-------
ForceField
A constructed ForceField object.
"""
if isinstance(data, dict):
kwargs.pop("dtype", None) # remove dtype if supplied
kwargs.update(data)
return cls(**kwargs)
return data.to_schema(**kwargs)
def to_file(
self,
filename: str,
dtype: Optional[str] = None,
translator: Optional[str] = None,
**kwargs: Dict[str, Any],
) -> None:
"""Writes the ForceField to a file.
Parameters
----------
filename : str
The filename to write to
dtype : Optional[str], optional
The type of file to write (e.g. psf, top, etc.), attempts to infer dtype from
file extension if not provided.
translator: Optional[str], optional
Translator name e.g. mmic_parmed. Takes precedence over dtype. If unset, MMElemental attempts
to find an appropriate translator if it is registered in the :class:`TransComponent` class.
**kwargs: Optional[str, Dict], optional
Additional kwargs to pass to the constructor.
"""
if not dtype:
from pathlib import Path
ext = Path(filename).suffix
else:
ext = "." + dtype
mode = kwargs.pop("mode", "w")
if ext == ".json":
stringified = self.json(**kwargs)
with open(filename, mode) as fp:
fp.write(stringified)
return
# Generic translator component
try:
from mmic_translator.components import TransComponent
except Exception:
TransComponent = None
if not translator:
if not TransComponent:
raise ModuleNotFoundError(_trans_nfound_msg)
translator = TransComponent.find_ffwrite_tk(ext)
if not translator:
raise NotImplementedError(
f"File extension {ext} not supported with any installed translators."
)
tkff = self.to_data(translator=translator, **kwargs)
tkff.to_file(filename, dtype=dtype, **kwargs) # pass dtype?
def to_data(
self,
dtype: Optional[str] = None,
translator: Optional[str] = None,
**kwargs: Dict[str, Any],
) -> "ToolkitModel":
"""
Constructs a toolkit-specific forcefield from MMSchema ForceField.
Which toolkit-specific component is called depends on which package is installed on the system.
Parameters
----------
translator: Optional[str], optional
Translator name e.g. mmic_parmed. Takes precedence over dtype. If unset, MMElemental attempts
to find an appropriate translator if it is registered in the :class:`TransComponent` class.
dtype: str, optional
Data type e.g. mdanalysis, parmed, etc.
**kwargs: Optional[Dict[str, Any]]
Additional kwargs to pass to the constructors.
Results
-------
ToolkitModel
Toolkit-specific ForceField object
"""
try:
from mmic_translator.components import TransComponent
except Exception:
TransComponent = None
if not translator:
if not dtype:
raise ValueError(
f"Either translator or dtype must be supplied when calling {__name__}."
)
if not TransComponent:
raise ModuleNotFoundError(_trans_nfound_msg)
translator = TransComponent.find_trans(dtype)
if importlib.util.find_spec(translator):
mod = importlib.import_module(translator)
tkff = mod._classes_map.get("ForceField")
if not tkff:
raise ValueError(
f"No ForceField model found while looking in translator: {translator}."
)
return tkff.from_schema(self)
else:
raise NotImplementedError(
f"Translator {translator} not available. Make sure it is properly installed."
)
def __eq__(self, other):
"""
Checks if two models are identical. This is a forcefield identity defined
by scientific terms, and not programing terms, so it's less rigorous than
a programmatic equality or a memory equivalent `is`.
"""
if isinstance(other, dict):
other = self.__class__(**other)
elif isinstance(other, self.__class__):
pass
else:
raise TypeError(
f"Comparison between {self.__class__} and {type(other)} is not supported."
)
return self.get_hash() == other.get_hash()
@property
def hash_fields(self):
return [
"symbols",
"masses",
"masses_units",
"charges",
"charges_units",
"nonbonded",
"bonds",
"angles",
"dihedrals",
# "im_dihedrals",
"combination_rule",
"exclusions",
"inclusions",
]
def get_hash(self):
"""
Returns the hash of the force field object.
"""
m = hashlib.sha1()
concat = ""
# np.set_printoptions(precision=16)
for field in self.hash_fields:
data = getattr(self, field)
if data is not None:
if field == "symbols":
concat += json.dumps(data, default=lambda x: x.ravel().tolist())
if field == "charges":
data = float_prep(data, CHARGE_NOISE)
concat += json.dumps(data, default=lambda x: x.ravel().tolist())
if field == "masses":
data = float_prep(data, MASS_NOISE)
concat += json.dumps(data, default=lambda x: x.ravel().tolist())
if field in (
"nonbonded",
"bonds",
"angles",
"dihedrals",
"im_dihedrals",
):
if not isinstance(data, dict):
data = data.dict()
concat += json.dumps(data, default=lambda x: x.ravel().tolist())
m.update(concat.encode("utf-8"))
return m.hexdigest()
@property
def is_topology(self):
"""Returns True if model contains "topological" data rather than forcefield definition."""
return True if self.defs is None else False
|
the-stack_0_25832
|
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_int,
c_char,
c_char_p,
c_void_p,
c_bool,
c_uint8,
Structure,
byref,
cast,
create_string_buffer,
)
from enum import IntEnum
from datetime import timedelta
from ..ocf import OcfLib
from .shared import (
Uuid,
OcfError,
CacheLineSize,
CacheLines,
OcfCompletion,
SeqCutOffPolicy,
)
from ..utils import Size, struct_to_dict
from .core import Core
from .queue import Queue
from .stats.cache import CacheInfo
from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
class Backfill(Structure):
_fields_ = [("_max_queue_size", c_uint32), ("_queue_unblock_size", c_uint32)]
class CacheConfig(Structure):
MAX_CACHE_NAME_SIZE = 32
_fields_ = [
("_name", c_char * MAX_CACHE_NAME_SIZE),
("_cache_mode", c_uint32),
("_eviction_policy", c_uint32),
("_promotion_policy", c_uint32),
("_cache_line_size", c_uint64),
("_metadata_layout", c_uint32),
("_metadata_volatile", c_bool),
("_backfill", Backfill),
("_locked", c_bool),
("_pt_unaligned_io", c_bool),
("_use_submit_io_fast", c_bool),
]
class CacheDeviceConfig(Structure):
_fields_ = [
("_uuid", Uuid),
("_volume_type", c_uint8),
("_cache_line_size", c_uint64),
("_force", c_bool),
("_min_free_ram", c_uint64),
("_perform_test", c_bool),
("_discard_on_start", c_bool),
]
class ConfValidValues:
promotion_nhit_insertion_threshold_range = range(2, 1000)
promotion_nhit_trigger_threshold_range = range(0, 100)
class CacheMode(IntEnum):
WT = 0
WB = 1
WA = 2
PT = 3
WI = 4
WO = 5
DEFAULT = WT
def lazy_write(self):
return self.value in [CacheMode.WB, CacheMode.WO]
def write_insert(self):
return self.value not in [CacheMode.PT, CacheMode.WA, CacheMode.WI]
def read_insert(self):
return self.value not in [CacheMode.PT, CacheMode.WO]
class EvictionPolicy(IntEnum):
LRU = 0
DEFAULT = LRU
class PromotionPolicy(IntEnum):
ALWAYS = 0
NHIT = 1
DEFAULT = ALWAYS
class NhitParams(IntEnum):
INSERTION_THRESHOLD = 0
TRIGGER_THRESHOLD = 1
class CleaningPolicy(IntEnum):
NOP = 0
ALRU = 1
ACP = 2
DEFAULT = ALRU
class AlruParams(IntEnum):
WAKE_UP_TIME = 0
STALE_BUFFER_TIME = 1
FLUSH_MAX_BUFFERS = 2
ACTIVITY_THRESHOLD = 3
class AcpParams(IntEnum):
WAKE_UP_TIME = 0
FLUSH_MAX_BUFFERS = 1
class MetadataLayout(IntEnum):
STRIPING = 0
SEQUENTIAL = 1
DEFAULT = STRIPING
class Cache:
DEFAULT_BACKFILL_QUEUE_SIZE = 65536
DEFAULT_BACKFILL_UNBLOCK = 60000
DEFAULT_PT_UNALIGNED_IO = False
DEFAULT_USE_SUBMIT_FAST = False
def __init__(
self,
owner,
name: str = "cache",
cache_mode: CacheMode = CacheMode.DEFAULT,
eviction_policy: EvictionPolicy = EvictionPolicy.DEFAULT,
promotion_policy: PromotionPolicy = PromotionPolicy.DEFAULT,
cache_line_size: CacheLineSize = CacheLineSize.DEFAULT,
metadata_layout: MetadataLayout = MetadataLayout.DEFAULT,
metadata_volatile: bool = False,
max_queue_size: int = DEFAULT_BACKFILL_QUEUE_SIZE,
queue_unblock_size: int = DEFAULT_BACKFILL_UNBLOCK,
locked: bool = False,
pt_unaligned_io: bool = DEFAULT_PT_UNALIGNED_IO,
use_submit_fast: bool = DEFAULT_USE_SUBMIT_FAST,
):
self.device = None
self.started = False
self.owner = owner
self.cache_line_size = cache_line_size
self.cfg = CacheConfig(
_name=name.encode("ascii"),
_cache_mode=cache_mode,
_eviction_policy=eviction_policy,
_promotion_policy=promotion_policy,
_cache_line_size=cache_line_size,
_metadata_layout=metadata_layout,
_metadata_volatile=metadata_volatile,
_backfill=Backfill(
_max_queue_size=max_queue_size, _queue_unblock_size=queue_unblock_size
),
_locked=locked,
_pt_unaligned_io=pt_unaligned_io,
_use_submit_fast=use_submit_fast,
)
self.cache_handle = c_void_p()
self._as_parameter_ = self.cache_handle
self.io_queues = []
self.cores = []
def start_cache(self, default_io_queue: Queue = None, mngt_queue: Queue = None):
status = self.owner.lib.ocf_mngt_cache_start(
self.owner.ctx_handle, byref(self.cache_handle), byref(self.cfg)
)
if status:
raise OcfError("Creating cache instance failed", status)
self.owner.caches.append(self)
self.mngt_queue = mngt_queue or Queue(self, "mgmt-{}".format(self.get_name()))
if default_io_queue:
self.io_queues += [default_io_queue]
else:
self.io_queues += [Queue(self, "default-io-{}".format(self.get_name()))]
status = self.owner.lib.ocf_mngt_cache_set_mngt_queue(self, self.mngt_queue)
if status:
raise OcfError("Error setting management queue", status)
self.started = True
def change_cache_mode(self, cache_mode: CacheMode):
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_set_mode(self.cache_handle, cache_mode)
self.write_unlock()
if status:
raise OcfError("Error changing cache mode", status)
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_policy(
self.cache_handle, cleaning_policy
)
self.write_unlock()
if status:
raise OcfError("Error changing cleaning policy", status)
def set_cleaning_policy_param(
self, cleaning_policy: CleaningPolicy, param_id, param_value
):
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
self.cache_handle, cleaning_policy, param_id, param_value
)
self.write_unlock()
if status:
raise OcfError("Error setting cleaning policy param", status)
def set_promotion_policy(self, promotion_policy: PromotionPolicy):
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_promotion_set_policy(
self.cache_handle, promotion_policy
)
self.write_unlock()
if status:
raise OcfError("Error setting promotion policy", status)
def get_promotion_policy_param(self, promotion_type, param_id):
self.read_lock()
param_value = c_uint64()
status = self.owner.lib.ocf_mngt_cache_promotion_get_param(
self.cache_handle, promotion_type, param_id, byref(param_value)
)
self.read_unlock()
if status:
raise OcfError("Error getting promotion policy parameter", status)
return param_value
def set_promotion_policy_param(self, promotion_type, param_id, param_value):
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_promotion_set_param(
self.cache_handle, promotion_type, param_id, param_value
)
self.write_unlock()
if status:
raise OcfError("Error setting promotion policy parameter", status)
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.write_lock()
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(
self.cache_handle, policy
)
self.write_unlock()
if status:
raise OcfError("Error setting cache seq cut off policy", status)
def configure_device(
self, device, force=False, perform_test=True, cache_line_size=None
):
self.device = device
self.device_name = device.uuid
self.dev_cfg = CacheDeviceConfig(
_uuid=Uuid(
_data=cast(
create_string_buffer(self.device_name.encode("ascii")), c_char_p
),
_size=len(self.device_name) + 1,
),
_volume_type=device.type_id,
_cache_line_size=cache_line_size
if cache_line_size
else self.cache_line_size,
_force=force,
_min_free_ram=0,
_perform_test=perform_test,
_discard_on_start=False,
)
def attach_device(
self, device, force=False, perform_test=False, cache_line_size=None
):
self.configure_device(device, force, perform_test, cache_line_size)
self.write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
device.owner.lib.ocf_mngt_cache_attach(
self.cache_handle, byref(self.dev_cfg), c, None
)
c.wait()
self.write_unlock()
if c.results["error"]:
raise OcfError("Attaching cache device failed", c.results["error"])
def load_cache(self, device):
self.configure_device(device)
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
device.owner.lib.ocf_mngt_cache_load(
self.cache_handle, byref(self.dev_cfg), c, None
)
c.wait()
if c.results["error"]:
raise OcfError("Loading cache device failed", c.results["error"])
@classmethod
def load_from_device(cls, device, name="cache"):
c = cls(name=name, owner=device.owner)
c.start_cache()
try:
c.load_cache(device)
except: # noqa E722
c.stop()
raise
return c
@classmethod
def start_on_device(cls, device, **kwargs):
c = cls(owner=device.owner, **kwargs)
c.start_cache()
try:
c.attach_device(device, force=True)
except: # noqa E722
c.stop()
raise
return c
def put(self):
self.owner.lib.ocf_mngt_cache_put(self.cache_handle)
def get(self):
status = self.owner.lib.ocf_mngt_cache_get(self.cache_handle)
if status:
raise OcfError("Couldn't get cache instance", status)
def read_lock(self):
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_read_lock(self.cache_handle, c, None)
c.wait()
if c.results["error"]:
raise OcfError("Couldn't lock cache instance", c.results["error"])
def write_lock(self):
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_lock(self.cache_handle, c, None)
c.wait()
if c.results["error"]:
raise OcfError("Couldn't lock cache instance", c.results["error"])
def read_unlock(self):
self.owner.lib.ocf_mngt_cache_read_unlock(self.cache_handle)
def write_unlock(self):
self.owner.lib.ocf_mngt_cache_unlock(self.cache_handle)
def add_core(self, core: Core):
self.write_lock()
c = OcfCompletion(
[
("cache", c_void_p),
("core", c_void_p),
("priv", c_void_p),
("error", c_int),
]
)
self.owner.lib.ocf_mngt_cache_add_core(
self.cache_handle, byref(core.get_cfg()), c, None
)
c.wait()
if c.results["error"]:
self.write_unlock()
raise OcfError("Failed adding core", c.results["error"])
core.cache = self
core.handle = c.results["core"]
self.cores.append(core)
self.write_unlock()
def remove_core(self, core: Core):
self.write_lock()
c = OcfCompletion([("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_remove_core(core.handle, c, None)
c.wait()
self.write_unlock()
if c.results["error"]:
raise OcfError("Failed removing core", c.results["error"])
self.cores.remove(core)
def get_stats(self):
cache_info = CacheInfo()
usage = UsageStats()
req = RequestsStats()
block = BlocksStats()
errors = ErrorsStats()
self.read_lock()
status = self.owner.lib.ocf_cache_get_info(self.cache_handle, byref(cache_info))
if status:
self.read_unlock()
raise OcfError("Failed getting cache info", status)
status = self.owner.lib.ocf_stats_collect_cache(
self.cache_handle, byref(usage), byref(req), byref(block), byref(errors)
)
if status:
self.read_unlock()
raise OcfError("Failed getting stats", status)
line_size = CacheLineSize(cache_info.cache_line_size)
cache_name = self.owner.lib.ocf_cache_get_name(self).decode("ascii")
self.read_unlock()
return {
"conf": {
"attached": cache_info.attached,
"volume_type": self.owner.volume_types[cache_info.volume_type],
"size": CacheLines(cache_info.size, line_size),
"inactive": {
"occupancy": CacheLines(
cache_info.inactive.occupancy.value, line_size
),
"dirty": CacheLines(cache_info.inactive.dirty.value, line_size),
"clean": CacheLines(cache_info.inactive.clean.value, line_size),
},
"occupancy": CacheLines(cache_info.occupancy, line_size),
"dirty": CacheLines(cache_info.dirty, line_size),
"dirty_initial": CacheLines(cache_info.dirty_initial, line_size),
"dirty_for": timedelta(seconds=cache_info.dirty_for),
"cache_mode": CacheMode(cache_info.cache_mode),
"fallback_pt": {
"error_counter": cache_info.fallback_pt.error_counter,
"status": cache_info.fallback_pt.status,
},
"state": cache_info.state,
"eviction_policy": EvictionPolicy(cache_info.eviction_policy),
"cleaning_policy": CleaningPolicy(cache_info.cleaning_policy),
"promotion_policy": PromotionPolicy(cache_info.promotion_policy),
"cache_line_size": line_size,
"flushed": CacheLines(cache_info.flushed, line_size),
"core_count": cache_info.core_count,
"metadata_footprint": Size(cache_info.metadata_footprint),
"metadata_end_offset": Size(cache_info.metadata_end_offset),
"cache_name": cache_name,
},
"block": struct_to_dict(block),
"req": struct_to_dict(req),
"usage": struct_to_dict(usage),
"errors": struct_to_dict(errors),
}
def reset_stats(self):
self.owner.lib.ocf_core_stats_initialize_all(self.cache_handle)
def get_default_queue(self):
if not self.io_queues:
raise Exception("No queues added for cache")
return self.io_queues[0]
def save(self):
if not self.started:
raise Exception("Not started!")
self.get_and_write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_save(self.cache_handle, c, None)
c.wait()
self.put_and_write_unlock()
if c.results["error"]:
raise OcfError("Failed saving cache", c.results["error"])
def stop(self):
if not self.started:
raise Exception("Already stopped!")
self.write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_stop(self.cache_handle, c, None)
c.wait()
if c.results["error"]:
self.write_unlock()
raise OcfError("Failed stopping cache", c.results["error"])
self.mngt_queue.put()
del self.io_queues[:]
self.started = False
self.write_unlock()
self.owner.caches.remove(self)
def flush(self):
self.write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_flush(self.cache_handle, c, None)
c.wait()
self.write_unlock()
if c.results["error"]:
raise OcfError("Couldn't flush cache", c.results["error"])
def get_name(self):
self.read_lock()
try:
return str(self.owner.lib.ocf_cache_get_name(self), encoding="ascii")
except: # noqa E722
raise OcfError("Couldn't get cache name")
finally:
self.read_unlock()
lib = OcfLib.getInstance()
lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]
lib.ocf_cache_get_name.argtypes = [c_void_p]
lib.ocf_cache_get_name.restype = c_char_p
lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [c_void_p, c_uint32]
lib.ocf_mngt_cache_cleaning_set_policy.restype = c_int
lib.ocf_mngt_core_set_seq_cutoff_policy_all.argtypes = [c_void_p, c_uint32]
lib.ocf_mngt_core_set_seq_cutoff_policy_all.restype = c_int
lib.ocf_stats_collect_cache.argtypes = [
c_void_p,
c_void_p,
c_void_p,
c_void_p,
c_void_p,
]
lib.ocf_stats_collect_cache.restype = c_int
lib.ocf_cache_get_info.argtypes = [c_void_p, c_void_p]
lib.ocf_cache_get_info.restype = c_int
lib.ocf_mngt_cache_cleaning_set_param.argtypes = [
c_void_p,
c_uint32,
c_uint32,
c_uint32,
]
lib.ocf_mngt_cache_cleaning_set_param.restype = c_int
|
the-stack_0_25833
|
import logging
import os
import re
import utils.env_utils as env_utils
from config.constants import FILE_TYPE_DIR, FILE_TYPE_FILE
ENV_VAR_PREFIX = '$$'
SECURE_MASK = '*' * 6
LOGGER = logging.getLogger('script_server.model_helper')
def resolve_env_vars(value, *, full_match=False):
if not isinstance(value, str) or is_empty(value):
return value
if full_match:
if value.startswith(ENV_VAR_PREFIX):
return env_utils.read_variable(value[2:])
return value
def resolve_var(match):
var_match = match.group()
var_name = var_match[2:]
resolved = env_utils.read_variable(var_name, fail_on_missing=False)
if resolved is not None:
return resolved
return var_match
pattern = re.escape(ENV_VAR_PREFIX) + '\w+'
return re.sub(pattern, resolve_var, value)
def read_obligatory(values_dict, key, error_suffix=''):
value = values_dict.get(key)
if is_empty(value):
raise Exception('"' + key + '" is required attribute' + error_suffix)
return value
def read_list(values_dict, key, default=None):
"""
Reads value from values_dict as a list
If value is a list, then list is returned
If value is missing, then default value is returned (or an empty list if not specified)
If value is a dictionary, then error is raised
Otherwise, a list of single element is returned as a value
"""
value = values_dict.get(key)
if value is None:
if default is not None:
return default
return []
if isinstance(value, list):
return value
if isinstance(value, dict):
raise Exception('"' + key + '" has invalid type. List expected, got dictionary')
return [value]
def read_dict(values_dict, key, default=None):
"""
Reads value from values_dict as a dictionary
If value is a dict, then dict is returned
If value is missing, then default value is returned (or an empty dict if not specified)
Otherwise an error is raised
"""
value = values_dict.get(key)
if value is None:
if default is not None:
return default
return {}
if isinstance(value, dict):
return value
raise Exception('"' + key + '" has invalid type. Dict expected')
def read_bool_from_config(key, config_obj, *, default=None):
value = config_obj.get(key)
if value is None:
return default
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.lower() == 'true'
raise Exception('"' + key + '" field should be true or false')
def read_bool(value):
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise Exception('Invalid value, should be bool or string. value=' + repr(value))
return value.lower() == 'true'
def read_int_from_config(key, config_obj, *, default=None):
value = config_obj.get(key)
if value is None:
return default
if isinstance(value, int) and not isinstance(value, bool):
return value
if isinstance(value, str):
if value.strip() == '':
return default
try:
return int(value)
except ValueError as e:
raise InvalidValueException(key, 'Invalid %s value: integer expected, but was: %s' % (key, value)) from e
raise InvalidValueTypeException('Invalid %s value: integer expected, but was: %s' % (key, repr(value)))
def is_empty(value):
return (not value) and (value != 0) and (value is not False)
def fill_parameter_values(parameter_configs, template, values):
result = template
for parameter_config in parameter_configs:
if parameter_config.secure or parameter_config.no_value:
continue
parameter_name = parameter_config.name
value = values.get(parameter_name)
if value is None:
value = ''
if not isinstance(value, str):
value = str(value)
result = result.replace('${' + parameter_name + '}', str(value))
return result
def replace_auth_vars(text, username, audit_name):
result = text
if not username:
username = ''
if not audit_name:
audit_name = ''
result = result.replace('${auth.username}', str(username))
result = result.replace('${auth.audit_name}', str(audit_name))
return result
def normalize_extension(extension):
return re.sub('^\.', '', extension).lower()
def list_files(dir, file_type=None, file_extensions=None):
if not os.path.exists(dir) or not os.path.isdir(dir):
raise InvalidFileException(dir, 'Directory not found')
result = []
if not is_empty(file_extensions):
file_type = FILE_TYPE_FILE
sorted_files = sorted(os.listdir(dir), key=lambda s: s.casefold())
for file in sorted_files:
file_path = os.path.join(dir, file)
if file_type:
if file_type == FILE_TYPE_DIR and not os.path.isdir(file_path):
continue
elif file_type == FILE_TYPE_FILE and not os.path.isfile(file_path):
continue
if file_extensions and not os.path.isdir(file_path):
_, extension = os.path.splitext(file_path)
if normalize_extension(extension) not in file_extensions:
continue
result.append(file)
return result
class InvalidFileException(Exception):
def __init__(self, path, message) -> None:
super().__init__(message)
self.path = path
class InvalidValueException(Exception):
def __init__(self, param_name, validation_error) -> None:
super().__init__(validation_error)
self.param_name = param_name
class InvalidValueTypeException(Exception):
def __init__(self, message) -> None:
super().__init__(message)
|
the-stack_0_25835
|
#
from unittest import TestCase
from chaosaws.utils import breakup_iterable
class TestUtilities(TestCase):
def test_breakup_iterable(self):
iterable = []
for i in range(0, 100):
iterable.append("Object%s" % i)
iteration = []
for group in breakup_iterable(iterable, 25):
iteration.append(group)
self.assertEqual(len(group), 25)
self.assertEqual(len(iteration), 4)
|
the-stack_0_25836
|
from collections import defaultdict
from django.db import models, transaction
from django.db.models import Count, F
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
try:
from django.contrib.contenttypes.fields import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
from .models import Vote
from .utils import instance_required
class VotedQuerySet(QuerySet):
"""
if call votes.annotate with an `user` argument then add `is_voted` to each instance
"""
def __init__(self, model=None, query=None, using=None, user=None):
self.user = user
super(VotedQuerySet, self).__init__(model=model, query=query, using=using)
def __iter__(self):
super(VotedQuerySet, self).__iter__()
if self.user is None:
return iter(self._result_cache)
objects = self._result_cache
user_id = self.user.id
contenttype = ContentType.objects.get_for_model(self.model)
object_ids = [r.id for r in objects]
voted_users = defaultdict(list)
votes = Vote.objects.filter(content_type=contenttype, object_id__in=object_ids)
for v in votes:
voted_users[v.object_id].append(v.user_id)
for r in objects:
r.is_voted = user_id in voted_users.get(r.id, [])
self._result_cache = objects
return iter(objects)
def _clone(self):
c = super(VotedQuerySet, self)._clone()
c.user = self.user
return c
class _VotableManager(models.Manager):
def __init__(self, through, model, instance, field_name='votes', extra_field=None):
self.through = through
self.model = model
self.instance = instance
self.field_name = field_name
self.extra_field = extra_field
@instance_required
def up(self, user, vote):
with transaction.atomic():
if self.through.objects.filter(user=user, content_object=self.instance).exists():
c_type = ContentType.objects.get_for_model(self.instance)
vote_obj = self.through.objects.get(user=user, object_id=self.instance.id, content_type=c_type)
vote_obj.vote = vote
vote_obj.save()
self.instance.save()
else:
self.through(user=user, content_object=self.instance, vote=vote).save()
if self.extra_field:
setattr(self.instance, self.extra_field, F(self.extra_field)+1)
self.instance.save()
@instance_required
def down(self, user):
with transaction.atomic():
self.through.objects.filter(user=user, content_object=self.instance).delete()
if self.extra_field:
setattr(self.instance, self.extra_field, F(self.extra_field)-1)
self.instance.save()
@instance_required
def exists(self, user):
return self.through.objects.filter(user=user, content_object=self.instance).exists()
def all(self, user):
content_type = ContentType.objects.get_for_model(self.model)
object_ids = self.through.objects.filter(user=user, content_type=content_type).values_list('object_id', flat=True)
return self.model.objects.filter(pk__in=list(object_ids))
def count(self, vote = None):
if vote is None:
return self.through.votes_for(self.model, self.instance).count()
else:
return self.through.votes_for(self.model, self.instance).filter(vote = vote).count()
def likes(self):
return self.through.votes_for(self.model, self.instance).values_list("vote", flat=True)
def users(self):
return self.through.votes_for(self.model, self.instance).order_by('-create_at').values_list('user_id', 'create_at')
def annotate(self, queryset=None, user=None, annotation='num_votes', reverse=True):
order = reverse and '-%s' % annotation or annotation
kwargs = {annotation:Count('%s__user' % self.field_name)}
queryset = queryset if queryset is not None else self.model.objects.all()
queryset = queryset.annotate(**kwargs).order_by(order, '-id')
return VotedQuerySet(model=queryset.model, query=queryset.query, user=user)
class VotableManager(GenericRelation):
def __init__(self, through=Vote, manager=_VotableManager, **kwargs):
self.through = through
self.manager = manager
kwargs['verbose_name'] = kwargs.get('verbose_name', _('Votes'))
self.extra_field = kwargs.pop('extra_field', None)
super(VotableManager, self).__init__(self.through, **kwargs)
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their votes." % model.__name__)
manager = self.manager(
through=self.through,
model=model,
instance=instance,
field_name=self.name,
extra_field=self.extra_field,
)
return manager
def contribute_to_class(self, cls, name):
super(VotableManager, self).contribute_to_class(cls, name)
setattr(cls, name, self)
|
the-stack_0_25838
|
"""
Bbox Target Operator
select foreground and background proposal and encode them as training target.
"""
import mxnet as mx
import numpy as np
import numpy.random as npr
from ast import literal_eval
from operator_py.detectron_bbox_utils import bbox_overlaps, bbox_transform_inv
def _sample_proposal(proposals, gt_bboxes, image_rois, fg_fraction, fg_thresh, bg_thresh_hi,
bg_thresh_lo, inv_stds, num_reg_class, xywh):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
num_gt = gt_bboxes.shape[0]
num_proposal = proposals.shape[0]
ignore_label = -2
valid_gt_index = np.where(gt_bboxes[:, 4] != ignore_label)[0]
gt_bboxes = gt_bboxes[valid_gt_index]
if gt_bboxes.shape[0] != 0:
proposal_to_gt_overlaps = bbox_overlaps(
proposals.astype(np.float32, copy=False),
gt_bboxes.astype(np.float32, copy=False)
)
else:
proposal_to_gt_overlaps = np.zeros((num_proposal, 1))
proposal_assigned_gt_index = proposal_to_gt_overlaps.argmax(axis=1)
proposal_assigned_class = gt_bboxes[:, 4][proposal_assigned_gt_index]
proposal_max_overlap_w_gt = proposal_to_gt_overlaps.max(axis=1)
rois_per_image = image_rois
fg_rois_per_image = int(np.round(fg_fraction * rois_per_image))
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(proposal_max_overlap_w_gt >= fg_thresh)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False
)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where(
(proposal_max_overlap_w_gt < bg_thresh_hi) &
(proposal_max_overlap_w_gt >= bg_thresh_lo)
)[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False
)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Label is the class each RoI has max overlap with
sampled_labels = proposal_assigned_class[keep_inds]
sampled_labels[fg_rois_per_this_image:] = 0 # Label bg RoIs with class 0
sampled_proposals = proposals[keep_inds]
sampled_gt_bboxes = gt_bboxes[proposal_assigned_gt_index[keep_inds]]
bbox_targets = bbox_transform_inv(sampled_proposals, sampled_gt_bboxes, inv_stds)
bbox_class = sampled_labels[:, None]
if num_reg_class == 2:
bbox_class = np.array(bbox_class > 0, dtype=bbox_targets.dtype)
bbox_targets_with_class = np.concatenate([bbox_class, bbox_targets], axis=1)
bbox_targets, bbox_weights = _expand_bbox_targets(bbox_targets_with_class, num_reg_class)
return sampled_proposals, sampled_labels, bbox_targets, bbox_weights
def _expand_bbox_targets(bbox_target_data, num_bbox_reg_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_bbox_reg_classes))
bbox_weights = np.zeros(bbox_targets.shape)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)
return bbox_targets, bbox_weights
class BboxTargetOperator(mx.operator.CustomOp):
def __init__(self, num_classes, add_gt_to_proposal, image_rois, fg_fraction,
fg_thresh, bg_thresh_hi, bg_thresh_lo, bbox_target_std, xywh):
super().__init__()
self._num_classes = num_classes
self._add_gt_to_proposal = add_gt_to_proposal
self._image_rois = image_rois
self._fg_fraction = fg_fraction
self._fg_thresh = fg_thresh
self._bg_thresh_hi = bg_thresh_hi
self._bg_thresh_lo = bg_thresh_lo
self._bbox_target_std = bbox_target_std
self._xywh = xywh
def forward(self, is_train, req, in_data, out_data, aux):
proposals = in_data[0].asnumpy() # N x K x 4
gt_bboxes = in_data[1].asnumpy() # N x M x 5
batch_image = proposals.shape[0]
image_rois = self._image_rois
fg_fraction = self._fg_fraction
fg_thresh = self._fg_thresh
bg_thresh_hi = self._bg_thresh_hi
bg_thresh_lo = self._bg_thresh_lo
inv_stds = list(1.0 / std for std in self._bbox_target_std)
num_reg_class = self._num_classes
xywh = self._xywh
keep_proposals = []
keep_gt_bboxes = []
# clean up gt_bbox
for im_gt_bbox in gt_bboxes:
valid = np.where(im_gt_bbox[:, 4] != -1)[0] # class == -1 indicates padding
keep_gt_bboxes.append(im_gt_bbox[valid])
# clean up proposal
for im_proposal in proposals:
valid = np.where(im_proposal[:, -1] != 0)[0] # y2 == 0 indicates padding
keep_proposals.append(im_proposal[valid])
if self._add_gt_to_proposal:
for i in range(batch_image):
im_proposal, im_gt_bbox = keep_proposals[i], keep_gt_bboxes[i]
keep_proposals[i] = np.append(im_proposal, im_gt_bbox[:, :4], axis=0)
sampled_proposal, bbox_class, bbox_target, bbox_target_weight = [], [], [], []
for i in range(batch_image):
output = _sample_proposal(
keep_proposals[i],
keep_gt_bboxes[i],
image_rois,
fg_fraction,
fg_thresh,
bg_thresh_hi,
bg_thresh_lo,
inv_stds,
num_reg_class,
xywh
)
sampled_proposal_i, bbox_class_i, bbox_target_i, bbox_target_weight_i = output
sampled_proposal.append(sampled_proposal_i)
bbox_class.append(bbox_class_i)
bbox_target.append(bbox_target_i)
bbox_target_weight.append(bbox_target_weight_i)
sampled_proposal = np.array(sampled_proposal, dtype=np.float32)
bbox_class = np.array(bbox_class, dtype=np.float32)
bbox_target = np.array(bbox_target, dtype=np.float32)
bbox_target_weight = np.array(bbox_target_weight, dtype=np.float32)
for i, val in enumerate([sampled_proposal, bbox_class, bbox_target, bbox_target_weight]):
self.assign(out_data[i], req[i], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
@mx.operator.register('bbox_target')
class BboxTargetProp(mx.operator.CustomOpProp):
def __init__(self, num_class, add_gt_to_proposal, image_rois, fg_fraction, fg_thresh,
bg_thresh_hi, bg_thresh_lo, bbox_target_std, xywh='True'):
super().__init__(need_top_grad=False)
self._num_class = int(num_class)
self._add_gt_to_proposal = literal_eval(add_gt_to_proposal)
self._image_rois = int(image_rois)
self._fg_fraction = float(fg_fraction)
self._fg_thresh = float(fg_thresh)
self._bg_thresh_hi = float(bg_thresh_hi)
self._bg_thresh_lo = float(bg_thresh_lo)
self._bbox_target_std = literal_eval(bbox_target_std)
self._xywh = literal_eval(xywh)
if self._xywh:
print('bbox_target encode type: xywh')
else:
print('bbox_target encode type: xyxy')
def list_arguments(self):
return ['proposal', 'gt_bbox']
def list_outputs(self):
return ['sampled_proposal', 'bbox_cls', 'bbox_target', 'bbox_target_weight']
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
gt_boxes_shape = in_shape[1]
batch_image = rpn_rois_shape[0]
sampled_proposal_shape = (batch_image, self._image_rois, 4)
bbox_cls_shape = (batch_image, self._image_rois, )
bbox_target_shape = (batch_image, self._image_rois, self._num_class * 4)
bbox_weight_shape = (batch_image, self._image_rois, self._num_class * 4)
return [rpn_rois_shape, gt_boxes_shape], \
[sampled_proposal_shape, bbox_cls_shape, bbox_target_shape, bbox_weight_shape]
def create_operator(self, ctx, shapes, dtypes):
return BboxTargetOperator(
self._num_class,
self._add_gt_to_proposal,
self._image_rois,
self._fg_fraction,
self._fg_thresh,
self._bg_thresh_hi,
self._bg_thresh_lo,
self._bbox_target_std,
self._xywh
)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
|
the-stack_0_25841
|
import smithwilson as sw
import pandas as pd
# Input - Switzerland EIOPA spot rates with LLP 25 years and extrapolation period of 150 years
# Source: https://wayback.archive-it.org/org-1495/20191229100044/https:/eiopa.europa.eu/Publications/Standards/EIOPA_RFR_20190531.zip
# EIOPA_RFR_20190531_Term_Structures.xlsx; Tab: RFR_spot_no_VA
rates = [-0.00803, -0.00814, -0.00778, -0.00725, -0.00652,
-0.00565, -0.0048, -0.00391, -0.00313, -0.00214,
-0.0014, -0.00067, -0.00008, 0.00051, 0.00108,
0.00157, 0.00197, 0.00228, 0.0025, 0.00264,
0.00271, 0.00274, 0.0028, 0.00291, 0.00309]
terms = [float(y + 1) for y in range(len(rates))] # 1.0, 2.0, ..., 25.0
ufr = 0.029
alpha = 0.128562
# Target - Extrapolate to 150 years
terms_ext = [float(y + 1) for y in range(150)] # 1.0, 2.0, ..., 150.0
# Calculate fitted rates based on actual observations and two parametes alpha & UFR
rates_ext = sw.fit_smithwilson_rates(rates_obs=rates, t_obs=terms,
t_target=terms_ext, alpha=alpha, ufr=ufr)
# Display Outputs
# Create dictionary with maturity as key and rate as value
observed = dict(zip(terms, rates))
extrapolated = dict(zip(terms_ext, rates_ext.flatten()))
# Create and print dataframe
print(pd.DataFrame({"Observed": observed, "Extrapolated": extrapolated}))
|
the-stack_0_25844
|
# -*- coding: utf-8 -*-
import random
import time
from bson import ObjectId
# Random trips generation
NUM_TRIPS_MIN = 0
NUM_TRIPS_MAX = 3
NUM_STEPS_MIN = 1
NUM_STEPS_MAX = 5
STEP_DURATION_MIN = 5 * 60 # 5 minutes
STEP_DURATION_MAX = 20 * 60
STEP_WAIT_TIME_MIN = 5 * 60
STEP_WAIT_TIME_MAX = 20 * 60
LAT_MIN = 40
LAT_MAX = 42
LON_MIN = 11
LON_MAX = 13
LAT_LON_DECIMAL_POINTS = 6
ROADS = [ 'Boswell Rd', 'Pall Mall', 'Upsall Grove', 'Beechfield Rd', 'Glan Dafarn' ]
ADDRESS_NUM_MAX = 200
STOPS = [ 'FLAMINIA NUOVA', 'CASAL SELCE', 'OCEANO INDIANO' ]
TRANSPORT_NUM_MAX = 999
PRICE_MAX = 10
PRICE_DECIMAL_POINTS = 2
# Random positions generation
NUM_USERS_POSITION_MIN = 0.8 # random positions for at least 80% of users
NUM_POSITIONS_PER_USER_MIN = 0
NUM_POSITIONS_PER_USER_MAX = 10
POSITIONS_MIN_TIMESTAMP = 1451606400 # from 1/1/2016
POSITIONS_MAX_TIMESTAMP = int(time.time()) # to now
#===============================================================================
# objectids_to_str ()
#===============================================================================
def objectids_to_str(obj):
assert isinstance(obj, list) or isinstance(obj, dict)
obj_list = obj if isinstance(obj, list) else [ obj ]
for i in range(len(obj_list)):
if isinstance(obj_list[i], ObjectId):
obj_list[i] = str(obj_list[i])
elif isinstance(obj_list[i], list):
objectids_to_str(obj_list[i])
elif isinstance(obj_list[i], dict):
d = obj_list[i]
for key in d:
if isinstance(d[key], ObjectId):
d[key] = str(d[key])
elif isinstance(d[key], list) or isinstance(d[key], dict):
objectids_to_str(d[key])
# Remove eve extra fields
for key in list(d): # list(): get copy of keys (we modify dict)
if key.startswith('_') and key != '_id':
d.pop(key)
#===============================================================================
# get_real_objectids ()
#===============================================================================
def get_real_objectids(db):
resources = [ 'users', 'cars', 'rides' ]
d = {}
for resource in resources:
d[resource] = [ v for v in db[resource].find({'_deleted': False}) ]
assert d[resource]
objectids_to_str(d[resource])
return d
#===============================================================================
# random_trips ()
#===============================================================================
def random_trips(request, db):
state = {}
state['ids'] = get_real_objectids(db)
num_trips = random.randint(NUM_TRIPS_MIN, NUM_TRIPS_MAX)
return [ random_trip(request, state) for _ in range(num_trips) ]
#===============================================================================
# random_trip ()
#===============================================================================
def random_trip(request, state):
state['used_car_pooling'] = False
state['last_step_time'] = time.time() + 120 * 60 # 2 hours from now
num_steps = random.randint(NUM_STEPS_MIN, NUM_STEPS_MAX)
return {
'steps': [ random_step(request, state) for _ in range(num_steps) ]
}
#===============================================================================
# random_step ()
#===============================================================================
def random_step(request, state):
return {
'route': random_route(request, state),
'transport': random_transport(request, state),
'price': random_price(request, state),
'distance': random_distance(request, state),
}
#===============================================================================
# random_route ()
#===============================================================================
def random_route(request, state):
start_timestamp = state['last_step_time'] + random.randint(STEP_WAIT_TIME_MIN, STEP_WAIT_TIME_MAX)
end_timestamp = start_timestamp + random.randint(STEP_DURATION_MIN, STEP_DURATION_MAX)
state['last_step_time'] = end_timestamp
return {
'start_point': {
'point': {
'lat': round(random.uniform(LAT_MIN, LAT_MAX), LAT_LON_DECIMAL_POINTS),
'lon': round(random.uniform(LON_MIN, LON_MAX), LAT_LON_DECIMAL_POINTS),
},
'date': int(start_timestamp),
'address': '%d %s' % (random.randint(1, ADDRESS_NUM_MAX), random.choice(ROADS))
},
'end_point': {
'point': {
'lat': round(random.uniform(LAT_MIN, LAT_MAX), LAT_LON_DECIMAL_POINTS),
'lon': round(random.uniform(LON_MIN, LON_MAX), LAT_LON_DECIMAL_POINTS),
},
'date': int(end_timestamp),
'address': '%d %s' % (random.randint(1, ADDRESS_NUM_MAX), random.choice(ROADS))
}
}
#===============================================================================
# random_transport ()
#===============================================================================
def random_transport(request, state):
travel_modes = [ 'CAR_POOLING', 'METRO', 'BUS', 'RAIL', 'FEET', 'TRAM' ]
d = state['ids']
if request:
if request.args['use_bus'] == 'false':
travel_modes.remove('BUS')
if request.args['use_metro'] == 'false':
travel_modes.remove('METRO')
if request.args['use_train'] == 'false':
travel_modes.remove('RAIL')
# For now, allow only one car pooling step per trip
if state['used_car_pooling']:
travel_modes.remove('CAR_POOLING')
travel_mode = random.choice(travel_modes)
if travel_mode in [ 'METRO', 'BUS', 'RAIL', 'FEET', 'TRAM' ]:
num = random.randint(1, TRANSPORT_NUM_MAX)
stop = random.choice(STOPS)
return {
'travel_mode': travel_mode,
'short_name': '%s' % num,
'long_name': '%s towards %s' % (num, stop)
}
elif travel_mode == 'CAR_POOLING':
state['used_car_pooling'] = True
ride = random.choice(d['rides'])
return {
'travel_mode': 'CAR_POOLING',
'ride_id': ride['_id'],
'car_id': ride['car_id'],
'driver_id': ride['driver_id'],
}
#===============================================================================
# random_price ()
#===============================================================================
def random_price(request, state):
return {
'amount': round(random.uniform(1, PRICE_MAX), PRICE_DECIMAL_POINTS),
'currency': 'EUR'
}
#===============================================================================
# random_distance ()
#===============================================================================
def random_distance(request, state):
return random.randint(1, 10)
#===============================================================================
# random_positions ()
#===============================================================================
def random_positions(request, db):
state = {}
state['ids'] = get_real_objectids(db)
state['user_created'] = 0
num_users = int(len(state['ids']['users']) * random.uniform(NUM_USERS_POSITION_MIN, 1))
return [ random_user_positions(request, state) for _ in range(num_users) ]
#===============================================================================
# random_user_positions ()
#===============================================================================
def random_user_positions(request, state):
num_positions = random.randint(NUM_POSITIONS_PER_USER_MIN, NUM_POSITIONS_PER_USER_MAX)
user_positions = {
'user_id': state['ids']['users'][state['user_created']]['_id'],
'positions': [ random_position(request, state) for _ in range(num_positions) ]
}
state['user_created'] += 1
return user_positions
#===============================================================================
# random_position ()
#===============================================================================
def random_position(request, state):
return {
'point': {
'lat': round(random.uniform(LAT_MIN, LAT_MAX), LAT_LON_DECIMAL_POINTS),
'lon': round(random.uniform(LON_MIN, LON_MAX), LAT_LON_DECIMAL_POINTS),
},
'timestamp': random.randint(POSITIONS_MIN_TIMESTAMP, POSITIONS_MAX_TIMESTAMP)
}
|
the-stack_0_25846
|
# Owner(s): ["module: primTorch"]
from collections import defaultdict
from torch import Tensor
import torch.autograd
from torch.utils._python_dispatch import enable_torch_dispatch_mode
from torch._decomp import decomposition_table
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.testing._internal.logging_tensor import no_dispatch
from torch.testing._internal.common_utils import (
is_iterable_of_tensors,
TestCase,
skipIfCrossRef,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
)
from torch.testing._internal.common_device_type import (
onlyNativeDeviceTypes,
ops,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import itertools
import functools
from functools import partial
import unittest
aten = torch.ops.aten
# TODO: this isn't going to work with non-aten namespaces
def overload_to_aten_name(overload):
return overload._schema.name.split("::")[1]
# All operators that can have decomp tests
decomposition_names = {overload_to_aten_name(k) for k in decomposition_table}
_decomp_test_ops = [
op
for op in op_db
if op.aten_name in decomposition_names
or op.aten_backward_name in decomposition_names
]
def diff_arg(arg, requires_grad=True):
def is_differentiable_arg(arg):
if requires_grad:
return arg.requires_grad
else:
return arg.is_floating_point() or arg.is_complex()
if is_iterable_of_tensors(arg):
if all([is_differentiable_arg(a) for a in arg]):
return True
if all([not is_differentiable_arg(a) for a in arg]):
return False
raise RuntimeError("NYI: The test runner can't handle this")
return isinstance(arg, Tensor) and is_differentiable_arg(arg)
# Version of autograd.grad with some differences:
# - pytree inputs is allowed (but leaves of the pytree have to all
# be tensors)
# - if an input is not used as part of derivatives, we will return a
# zero-filled tensor for the result
def _autograd_grad(
outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True
):
inputs, inputs_spec = tree_flatten(inputs)
diff_inputs = tuple(inp for inp in inputs if inp.requires_grad)
if grad_outputs is None:
diff_outputs = tuple(out for out in outputs if out.requires_grad)
else:
diff_grad_outputs = [
(out, go) for out, go in zip(outputs, grad_outputs) if out.requires_grad
]
if len(diff_grad_outputs) == 0:
diff_outputs, grad_outputs = (), ()
else:
diff_outputs, grad_outputs = zip(*diff_grad_outputs)
grad_inputs = torch.autograd.grad(
diff_outputs,
diff_inputs,
grad_outputs,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=True,
)
result = []
grad_inputs_iter = iter(grad_inputs)
for inp in inputs:
if inp.requires_grad:
grad_input = next(grad_inputs_iter)
if grad_input is None:
result.append(torch.zeros_like(inp))
else:
result.append(grad_input)
else:
result.append(torch.zeros_like(inp))
return tree_unflatten(result, inputs_spec)
def _as_tuple(val):
if isinstance(val, tuple):
return val
return (val,)
def ref_vjp_no_create(f, *primals):
result = f(*primals)
def wrapped(cotangents):
return _autograd_grad(
_as_tuple(result), primals, _as_tuple(cotangents), create_graph=False
)
return result, wrapped
dtype_precisions = {
torch.float16: (0.001, 1e-5),
torch.bfloat16: (0.016, 1e-4),
torch.float32: (1.3e-6, 1e-5),
torch.float64: (1e-7, 1e-7),
torch.complex32: (0.001, 1e-5),
torch.complex64: (1.3e-6, 1e-5),
torch.complex128: (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(dtype0, dtype1):
rtol = max(
dtype_precisions.get(dtype0, (0, 0))[0], dtype_precisions.get(dtype1, (0, 0))[0]
)
atol = max(
dtype_precisions.get(dtype0, (0, 0))[1], dtype_precisions.get(dtype1, (0, 0))[1]
)
return rtol, atol
def op_assert_ref(test_case, op, orig, decomp, ref, args, kwargs):
assert orig.dtype == decomp.dtype, f"Operation: {op}"
if orig.numel() == 0 or decomp.numel() == 0:
assert orig.numel() == decomp.numel()
return
if ref.is_floating_point():
orig_diff = (orig - ref).abs().max()
decomp_diff = (decomp - ref).abs().max()
atol = 1e-10
if decomp_diff > orig_diff + atol:
raise RuntimeError(
f"Difference from float64 is larger with decomposition {op.__name__}"
f" than original. Original max diff: {orig_diff}, Decomp max diff: {decomp_diff}\n"
f"args = {args}\n"
f"kwargs = {kwargs}"
)
else:
test_case.assertEqual(
orig, decomp, msg=f"{op.__name__}\nargs = {args}\nkwargs = {kwargs}"
)
def op_assert_equal(test_case, op, orig, decomp, args, kwargs):
test_case.assertEqual(
orig.dtype, decomp.dtype, f"Operation: {op}, orig.dtype: {orig.dtype}, decomp.dtype: {decomp.dtype}, {args}, {kwargs}")
# Before adding an entry to this table, make sure your decomposition is right :)
tol_table = {
# Due to strange epsilon behaviors, see https://github.com/pytorch/pytorch/issues/73161
(torch.float32, torch.ops.aten.native_layer_norm.default): (1e-3, 1e-3),
(torch.float32, torch.ops.aten.native_layer_norm_backward.default): (
1e-3,
1e-3,
),
}
if (decomp.dtype, op) in tol_table:
rtol, atol = tol_table[(decomp.dtype, op)]
else:
rtol, atol = _getDefaultRtolAndAtol(orig.dtype, decomp.dtype)
test_case.assertEqual(orig, decomp, rtol=rtol, atol=atol, msg=f"{op.__name__}\nargs = {args}\nkwargs = {kwargs}")
# Given f, returns an f' such that:
# - f' takes only positional arguments
# - All arguments to f' are floating-point Tensors
# - All outputs of f' are floating-point Tensors
def normalize_op_input_output2(
f, args, kwargs, output_process_fn_grad=None, requires_grad=True
):
flat_args, args_spec = tree_flatten(args)
diff_argnums = tuple(
i
for i, arg in enumerate(flat_args)
if diff_arg(arg, requires_grad=requires_grad)
)
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args = tree_unflatten(_args, args_spec)
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
# TODO: Remove the following hack for namedtuples
result = tuple(result)
result = tuple(
r
for r in result
if isinstance(r, Tensor) and (r.is_floating_point() or r.is_complex())
)
assert len(result) > 0
return result
return wrapped, primals
# NB: This also upcasts dtype arguments
def upcast_tensor(func, x, dtype=torch.float32):
# TODO: stop hardcoding integer values to pass in
# dtype in torch.ops
FLOAT16_DTYPE = 5
BFLOAT16_DTYPE = 15
FLOAT64_DTYPE = 7
# Some functions take a dtype as argument, so we need to
# manually change that dtype in order to run it with a
# higher precision
dtype_arg_table = {
torch.ops.aten._softmax_backward_data.default,
torch.ops.aten._log_softmax_backward_data.default,
}
if isinstance(x, Tensor) and x.dtype.is_floating_point:
return x.to(dtype=dtype)
elif (
isinstance(x, int)
and func in dtype_arg_table
and x in [FLOAT16_DTYPE, BFLOAT16_DTYPE]
):
return FLOAT64_DTYPE
else:
return x
def normalize_op_input_output(f, sample, requires_grad=True):
args = tuple([sample.input] + list(sample.args))
return normalize_op_input_output2(
f,
args,
sample.kwargs,
sample.output_process_fn_grad,
requires_grad=requires_grad,
)
CROSS_REF_EXCLUDE_SET = {
# CUBLAS_STATUS_NOT_SUPPORTED when calling
# `cublasGemmStridedBatchedExFix(handle, opa, opb, (int)m, (int)n, (int)k,
# (void*)&falpha, a, CUDA_R_16BF, (int)lda, stridea, b, CUDA_R_16BF,
# (int)ldb, strideb, (void*)&fbeta, c, CUDA_R_16BF, (int)ldc, stridec,
# (int)num_batches, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)`
("cuda", torch.bfloat16, "nn.functional.bilinear"),
# randomness
("cuda", torch.float16, "nn.functional.dropout"),
("cuda", torch.bfloat16, "nn.functional.dropout"),
("cuda", torch.float64, "nn.functional.dropout"),
("cuda", torch.float32, "nn.functional.dropout"),
# decomp has problem even with opmath
("cuda", torch.bfloat16, "nn.functional.layer_norm"),
("cuda", torch.float16, "nn.functional.layer_norm"),
("cuda", torch.bfloat16, "nn.functional.batch_norm"),
("cuda", torch.float16, "nn.functional.batch_norm"),
("cuda", torch.bfloat16, "nn.functional.instance_norm"),
("cuda", torch.float16, "nn.functional.instance_norm"),
# doesn't work
("cuda", torch.bfloat16, "nn.functional.embedding"),
}
all_decomposed = set()
all_called = defaultdict(int)
# Helpful snippet for testing coverage
"""
import atexit
def check_coverage():
print("missing coverage:")
print("\n".join(map(str, decomposition_table.keys() - all_decomposed)))
atexit.register(check_coverage)
"""
# Helpful snippet for Horace to create his google sheet :)
"""
import atexit
def dump_ops():
with open('run_ops.txt', 'w') as f, open('count_ops.txt', 'w') as g:
for op, count in sorted(all_called.items(), key=lambda x: x[0].__name__):
f.write(f'{op.__name__}\n')
g.write(f'{count}\n')
with open('run_decompositions.txt', 'w') as f:
for op in sorted([i.__name__ for i in all_decomposed]):
f.write(f'{op}\n')
atexit.register(dump_ops)
"""
def any_unsupported(args, kwargs):
def test_unsupported(t):
if type(t) is torch.Tensor or type(t) is torch.nn.Parameter:
# These are all things that we haven't coded decompositions
# to handle correctly. Maybe they should.
return any([
t.is_sparse_csr, t.is_sparse, t.is_mkldnn, t.is_quantized,
t.is_nested, torch._is_functional_tensor(t),
])
elif torch.overrides.is_tensor_like(t):
# Decompositions will generally change the behavior of Tensor-like
# subclasses, so bypass tests in this case too
return True
else:
return False
flat_args, _ = tree_flatten(args)
flat_kwargs, _ = tree_flatten(kwargs)
return any(test_unsupported(x) for x in itertools.chain(flat_args, flat_kwargs))
class TestDecomp(TestCase):
longMessage = True
# NB: This actually overlaps with test_comprehensive, but it only
# runs on things that are definitely decomposed so it's a lot faster
# to run
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@skipIfCrossRef
@suppress_warnings
@ops(_decomp_test_ops)
def test_quick(self, device, dtype, op):
self.do_cross_ref(device, dtype, op, run_all=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyNativeDeviceTypes
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_comprehensive(self, device, dtype, op):
self.do_cross_ref(device, dtype, op, run_all=True)
def do_cross_ref(self, device, dtype, op, *, run_all):
if (torch.device(device).type, dtype, op.name) in CROSS_REF_EXCLUDE_SET or (
None,
dtype,
op.name,
) in CROSS_REF_EXCLUDE_SET:
self.skipTest(f"{op.name} in {dtype} not supported")
test_dtype = dtype
# We check the correctness of each decomposition right after running it.
# So, when we encounter a decomposition, we run the function normally, and
# then run the decomposition, and ensure they're identical.
called = set()
decomposed = set()
saved_precision = self.precision
saved_rel_tol = self.rel_tol
class DecompCrossRefMode(torch.Tensor):
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
with no_dispatch():
return cls._torch_dispatch(func, types, args, kwargs)
@classmethod
def _torch_dispatch(cls, func, types, args=(), kwargs=None):
self.precision = saved_precision
self.rel_tol = saved_rel_tol
called.add(func)
all_called[func] += 1
# Stuff we shouldn't bother testing
# (TODO: remove detach from the decomp table?)
if func not in decomposition_table or func in [
torch.ops.aten.detach.default
] or any_unsupported(args, kwargs):
return func(*args, **kwargs)
decomposed.add(func)
all_decomposed.add(func)
# We take 2 main strategies for verifying correctness/numerical stability of decompositions
# The first one is simply tolerance checking between decomp_out and pytorch_out
# However, for fp16/bf16 and reductions, this becomes very
# finicky, as there are not many guarantees we can make.
# So, for fp16/bf16, we instead compare the difference of
# {decomp_out, pytorch_out_64} and {pytorch_out,
# pytorch_out_64}. In other words, we compare how far the
# decomposition and pytorch are from the "ground truth" (i.e.
# fp64). If the decomposition results in more error, we error
decomposition = decomposition_table[func]
do_relative_check = test_dtype in [torch.float16, torch.bfloat16]
real_out_unflat = func(*args, **kwargs)
real_out, _ = tree_flatten(real_out_unflat)
decomp_out, _ = tree_flatten(decomposition(*args, **kwargs))
assert len(real_out) == len(decomp_out)
if do_relative_check:
upcast = partial(upcast_tensor, func, dtype=torch.float64)
real_out_double, _ = tree_flatten(
func(*tree_map(upcast, args), **tree_map(upcast, kwargs))
)
for orig, decomp, ref in zip(real_out, decomp_out, real_out_double):
if orig is None:
assert decomp is None
continue
op_assert_ref(self, func, orig, decomp, ref, args, kwargs)
else:
for orig, decomp in zip(real_out, decomp_out):
if orig is None:
assert decomp is None
continue
op_assert_equal(self, func, orig, decomp, args, kwargs)
return real_out_unflat
requires_grad = (
op.supports_autograd
and dtype in op.supported_backward_dtypes(torch.device(device).type)
# TODO: OpInfo really ought to error out for this case, but it's
# not exercised in test_ops_gradients atm. The problem is not
# complex32 per-se (which is supported by data movement only ops)
# but that when we do backwards we expect other ops like add to work
and not dtype == torch.complex32
)
samples = op.sample_inputs(device, test_dtype, requires_grad=requires_grad)
def check_decomposed(aten_name):
self.assertTrue(
any(overload_to_aten_name(c) == aten_name for c in decomposed),
msg=f"aten.{aten_name} was not decomposed, saw calls for: "
+ ", ".join(map(str, list(called))),
)
aten_name = op.decomp_aten_name or op.aten_name
func = op.get_op()
for sample_input in samples:
if requires_grad:
fn, primals = normalize_op_input_output(func, sample_input)
primals = tree_map(
lambda x: x if isinstance(x, torch.Tensor) else x, primals
)
# Once https://github.com/pytorch/pytorch/pull/75965/ I can
# store the called list on the mode object instance and no
# explicit clearing is necessary as I will create a fresh mode
# for each region
decomposed.clear()
with enable_torch_dispatch_mode(DecompCrossRefMode):
decomp_out, decomp_vjp_fn = ref_vjp_no_create(fn, *primals)
if aten_name in decomposition_names:
check_decomposed(aten_name)
if op.aten_backward_name in decomposition_names or run_all:
cotangents = tree_map(lambda x: torch.randn_like(x), decomp_out)
decomposed.clear()
with enable_torch_dispatch_mode(DecompCrossRefMode):
decomp_vjp_fn(cotangents)
if not run_all:
check_decomposed(op.aten_backward_name)
elif aten_name in decomposition_names or run_all:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
decomposed.clear()
with enable_torch_dispatch_mode(DecompCrossRefMode):
func(*args, **kwargs)
if not run_all:
check_decomposed(aten_name)
else:
assert op.supports_autograd
self.skipTest(
"only backwards is decomposed, but dtype doesn't support AD"
)
instantiate_device_type_tests(TestDecomp, globals())
if __name__ == "__main__":
run_tests()
|
the-stack_0_25848
|
import numpy as np
import matplotlib.pyplot as plt
plt.title('AMI')
plt.xlabel('Tempo')
plt.ylabel('Amplitude')
def ami(vetor):
pos = 0
for i in range(0,len(vetor)):
if vetor[i] == 0:
plt.plot([i,i+1], [0,0], 'yo-', color='k')
else:
if pos == 0:
plt.plot([i,i], [0,1], 'yo-', color='k')
plt.plot([i,i+1], [1,1], 'yo-', color='k')
plt.plot([i+1,i+1], [0,1], 'yo-', color='k')
else:
plt.plot([i,i], [0,-1], 'yo-', color='k')
plt.plot([i,i+1], [-1,-1], 'yo-', color='k')
plt.plot([i+1,i+1], [0,-1], 'yo-', color='k')
pos = 0 if pos == 1 else 1
#AMI
binario = [0,1,0,0,1,0]
#plt.plot2[0,1,0,0,1,0]
ami(binario)
plt.show()
#plt.show()
|
the-stack_0_25850
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""CUSUMDetectorModel is a wraper of CUSUMDetector to detect multiple change points
Typical usage example:
>>> # Define CUSUMDetectorModel
>>> model = CUSUMDetectorModel(
scan_window=43200,
historical_window=604800,
threshold=0.01,
delta_std_ratio=1.0,
serialized_model=None,
change_directions=["increase"],
score_func=CusumScoreFunction.percentage_change,
remove_seasonality=True,
)
>>> # Run detector
>>> respond = model.fit_predict(tsd)
>>> # Plot anomaly score
>>> respond.scores.plot(cols=['value'])
>>> # Get change points in unixtime
>>> change_points = model.cps
"""
import json
import logging
from datetime import datetime
from enum import Enum
from typing import Any, List, Optional, Union
import numpy as np
import pandas as pd
from kats.consts import (
DEFAULT_VALUE_NAME,
TimeSeriesData,
)
from kats.detectors.cusum_detection import CUSUMDetector, CUSUM_DEFAULT_ARGS
from kats.detectors.detector import DetectorModel
from kats.detectors.detector_consts import AnomalyResponse
from kats.utils.decomposition import TimeSeriesDecomposition
NORMAL_TOLERENCE = 1 # number of window
CHANGEPOINT_RETENTION = 7 * 24 * 60 * 60 # in seconds
MAX_CHANGEPOINT = 10
def percentage_change(
data: TimeSeriesData, pre_mean: float, **kwargs: Any
) -> TimeSeriesData:
"""
Calculate percentage change absolute change / baseline change
Args:
data: The data need to calculate the score
pre_mean: Baseline mean
"""
return (data - pre_mean) / (pre_mean)
def change(data: TimeSeriesData, pre_mean: float, **kwargs: Any) -> TimeSeriesData:
"""
Calculate absolute change
Args:
data: The data need to calculate the score
pre_mean: Baseline mean
"""
return data - pre_mean
def z_score(data: TimeSeriesData, pre_mean: float, pre_std: float) -> TimeSeriesData:
"""
Calculate z score: absolute change / std
Args:
data: The data need to calculate the score
pre_mean: Baseline mean
pre_std: Baseline std
"""
return (data - pre_mean) / (pre_std)
class CusumScoreFunction(Enum):
change = "change"
percentage_change = "percentage_change"
z_score = "z_score"
# Score Function Constants
SCORE_FUNC_DICT = {
CusumScoreFunction.change.value: change,
CusumScoreFunction.percentage_change.value: percentage_change,
CusumScoreFunction.z_score.value: z_score,
}
DEFAULT_SCORE_FUNCTION = CusumScoreFunction.change
STR_TO_SCORE_FUNC = { # Used for param tuning
"change": CusumScoreFunction.change,
"percentage_change": CusumScoreFunction.percentage_change,
"z_score": CusumScoreFunction.z_score,
}
class CUSUMDetectorModel(DetectorModel):
"""CUSUMDetectorModel for detecting multiple level shift change points
CUSUMDetectorModel runs CUSUMDetector multiple times to detect multiple change
points. In each run, CUSUMDetector will use historical_window + scan_window as
input time series, and find change point in scan_window. The DetectorModel stores
change points and returns anomaly score.
Attributes:
cps: Change points detected in unixtime.
alert_fired: If a change point is detected and the anomaly still present.
pre_mean: Previous baseline mean.
pre_std: Previous baseline std.
number_of_normal_scan: Number of scans with mean returned back to baseline.
alert_change_direction: Increase or decrease.
scan_window: Length in seconds of scan window.
historical_window: Length in seconds of historical window.
step_window: The time difference between CUSUM runs.
threshold: CUSUMDetector threshold.
delta_std_ratio: The mean delta have to larger than this parameter times std of
the data to be consider as a change.
magnitude_quantile: See in CUSUMDetector.
magnitude_ratio: See in CUSUMDetector.
score_func: The score function to calculate the anomaly score.
remove_seasonality: If apply STL to remove seasonality.
"""
def __init__(
self,
serialized_model: Optional[bytes] = None,
scan_window: Optional[int] = None,
historical_window: Optional[int] = None,
step_window: Optional[int] = None,
threshold: float = CUSUM_DEFAULT_ARGS["threshold"],
delta_std_ratio: float = CUSUM_DEFAULT_ARGS["delta_std_ratio"],
magnitude_quantile: float = CUSUM_DEFAULT_ARGS["magnitude_quantile"],
magnitude_ratio: float = CUSUM_DEFAULT_ARGS["magnitude_ratio"],
change_directions: List[str] = CUSUM_DEFAULT_ARGS["change_directions"],
score_func: Union[str, CusumScoreFunction] = DEFAULT_SCORE_FUNCTION,
remove_seasonality: bool = CUSUM_DEFAULT_ARGS["remove_seasonality"],
):
if serialized_model:
previous_model = json.loads(serialized_model)
self.cps = previous_model["cps"]
self.alert_fired = previous_model["alert_fired"]
self.pre_mean = previous_model["pre_mean"]
self.pre_std = previous_model["pre_std"]
self.number_of_normal_scan = previous_model["number_of_normal_scan"]
self.alert_change_direction = previous_model["alert_change_direction"]
self.scan_window = previous_model["scan_window"]
self.historical_window = previous_model["historical_window"]
self.step_window = previous_model["step_window"]
self.threshold = previous_model["threshold"]
self.delta_std_ratio = previous_model["delta_std_ratio"]
self.magnitude_quantile = previous_model["magnitude_quantile"]
self.magnitude_ratio = previous_model["magnitude_ratio"]
self.change_directions = previous_model["change_directions"]
self.score_func = previous_model["score_func"]
if "remove_seasonality" in previous_model:
self.remove_seasonality = previous_model["remove_seasonality"]
else:
self.remove_seasonality = remove_seasonality
elif scan_window is not None and historical_window is not None:
self.cps = []
self.alert_fired = False
self.pre_mean = 0
self.pre_std = 1
self.number_of_normal_scan = 0
self.alert_change_direction = None
self.scan_window = scan_window
self.historical_window = historical_window
self.step_window = step_window
self.threshold = threshold
self.delta_std_ratio = delta_std_ratio
self.magnitude_quantile = magnitude_quantile
self.magnitude_ratio = magnitude_ratio
self.change_directions = change_directions
self.remove_seasonality = remove_seasonality
# We allow score_function to be a str for compatibility with param tuning
if isinstance(score_func, str):
if score_func in STR_TO_SCORE_FUNC:
score_func = STR_TO_SCORE_FUNC[score_func]
else:
score_func = DEFAULT_SCORE_FUNCTION
self.score_func = score_func.value
else:
raise ValueError(
"""
You must either provide serialized model or values for scan_window and historical_window.
"""
)
# pyre-fixme[58]: `>=` is not supported for operand types `int` and
# `Optional[int]`.
# pyre-fixme[58]: `>=` is not supported for operand types `int` and
# `Optional[int]`.
if step_window is not None and step_window >= scan_window:
raise ValueError(
"Step window should smaller than scan window to ensure we have overlap for scan windows."
)
def __eq__(self, other):
if isinstance(other, CUSUMDetectorModel):
return (
self.cps == other.cps
and self.alert_fired == other.alert_fired
and self.pre_mean == other.pre_mean
and self.pre_std == other.pre_std
and self.number_of_normal_scan == other.number_of_normal_scan
and self.alert_change_direction == other.alert_change_direction
and self.scan_window == other.scan_window
and self.historical_window == other.historical_window
and self.step_window == other.step_window
and self.threshold == other.threshold
and self.delta_std_ratio == other.delta_std_ratio
and self.magnitude_quantile == other.magnitude_quantile
and self.magnitude_ratio == other.magnitude_ratio
and self.change_directions == other.change_directions
and self.score_func == other.score_func
)
return False
def serialize(self) -> bytes:
"""
Retrun serilized model.
"""
return str.encode(json.dumps(self.__dict__))
def _set_alert_off(self) -> None:
self.alert_fired = False
self.number_of_normal_scan = 0
def _set_alert_on(self, baseline_mean: float, baseline_std: float, alert_change_direction: str) -> None:
self.alert_fired = True
self.alert_change_direction = alert_change_direction
self.pre_mean = baseline_mean
self.pre_std = baseline_std
def _if_normal(self, cur_mean: float, change_directions: str) -> None:
if change_directions is not None:
increase, decrease = (
"increase" in change_directions,
"decrease" in change_directions,
)
else:
increase, decrease = True, True
if self.alert_change_direction == "increase":
check_increase = 0 if increase else np.inf
check_decrease = 1.0 if decrease else np.inf
elif self.alert_change_direction == "decrease":
check_increase = 1.0 if increase else np.inf
check_decrease = 0 if decrease else np.inf
return (
self.pre_mean - check_decrease * self.pre_std
<= cur_mean
<= self.pre_mean + check_increase * self.pre_std
)
def _fit(
self,
data: TimeSeriesData,
historical_data: TimeSeriesData,
scan_window: int,
threshold: float = CUSUM_DEFAULT_ARGS["threshold"],
delta_std_ratio: float = CUSUM_DEFAULT_ARGS["delta_std_ratio"],
magnitude_quantile: float = CUSUM_DEFAULT_ARGS["magnitude_quantile"],
magnitude_ratio: float = CUSUM_DEFAULT_ARGS["magnitude_ratio"],
change_directions: List[str] = CUSUM_DEFAULT_ARGS["change_directions"],
) -> None:
"""Fit CUSUM model.
Args:
data: the new data the model never seen
historical_data: the historical data, `historical_data` have to end with the
datapoint right before the first data point in `data`
scan_window: scan window length in seconds, scan window is the window where
cusum search for changepoint(s)
threshold: changepoint significant level, higher the value more changepoints
detected
delta_std_ratio: the mean change have to larger than `delta_std_ratio` *
`std(data[:changepoint])` to be consider as a change, higher the value
less changepoints detected
magnitude_quantile: float, the quantile for magnitude comparison, if
none, will skip the magnitude comparison;
magnitude_ratio: float, comparable ratio;
change_directions: a list contain either or both 'increas' and 'decrease' to
specify what type of change to detect;
"""
historical_data.extend(data, validate=False)
n = len(historical_data)
scan_start_time = historical_data.time.iloc[-1] - pd.Timedelta(
scan_window, unit="s"
)
scan_start_index = max(
0, np.argwhere((historical_data.time >= scan_start_time).values).min()
)
if not self.alert_fired:
# if scan window is less than 2 data poins and there is no alert fired
# skip this scan
if n - scan_start_index <= 1:
return
detector = CUSUMDetector(historical_data)
changepoints = detector.detector(
interest_window=[scan_start_index, n],
threshold=threshold,
delta_std_ratio=delta_std_ratio,
magnitude_quantile=magnitude_quantile,
magnitude_ratio=magnitude_ratio,
change_directions=change_directions,
)
if len(changepoints) > 0:
cp, meta = sorted(changepoints, key=lambda x: x[0].start_time)[0]
self.cps.append(int(cp.start_time.value / 1e9))
if len(self.cps) > MAX_CHANGEPOINT:
self.cps.pop(0)
self._set_alert_on(
historical_data.value[: meta.cp_index + 1].mean(),
historical_data.value[: meta.cp_index + 1].std(),
meta.direction,
)
else:
cur_mean = historical_data[scan_start_index:].value.mean()
# pyre-fixme[6]: Expected `str` for 2nd param but got `List[str]`.
if self._if_normal(cur_mean, change_directions):
self.number_of_normal_scan += 1
if self.number_of_normal_scan >= NORMAL_TOLERENCE:
self._set_alert_off()
else:
self.number_of_normal_scan = 0
current_time = int(data.time.max().value / 1e9)
if current_time - self.cps[-1] > CHANGEPOINT_RETENTION:
self._set_alert_off()
def _predict(
self,
data: TimeSeriesData,
score_func: CusumScoreFunction = CusumScoreFunction.change,
) -> TimeSeriesData:
"""
data: the new data for the anoamly score calculation.
"""
if self.alert_fired:
cp = self.cps[-1]
tz = data.tz()
if tz is None:
change_time = pd.to_datetime(cp, unit="s")
else:
change_time = pd.to_datetime(cp, unit="s", utc=True).tz_convert(tz)
if change_time >= data.time.iloc[0]:
cp_index = data.time[data.time == change_time].index[0]
data_pre = data[: cp_index + 1]
score_pre = self._zeros_ts(data_pre)
score_post = SCORE_FUNC_DICT[score_func](
data=data[cp_index + 1 :],
pre_mean=self.pre_mean,
pre_std=self.pre_std,
)
score_pre.extend(score_post, validate=False)
return score_pre
return SCORE_FUNC_DICT[score_func](
data=data, pre_mean=self.pre_mean, pre_std=self.pre_std
)
else:
return self._zeros_ts(data)
def _zeros_ts(self, data: TimeSeriesData) -> TimeSeriesData:
return TimeSeriesData(
time=data.time,
value=pd.Series(
np.zeros(len(data)),
name=data.value.name if data.value.name else DEFAULT_VALUE_NAME,
),
)
# pyre-fixme[14]: `fit_predict` overrides method defined in `DetectorModel`
# inconsistently.
def fit_predict(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
) -> AnomalyResponse:
"""
This function combines fit and predict and return anomaly socre for data. It
requires scan_window > step_window.
The relationship between two consective cusum runs in the loop is shown as below:
>>> |---historical_window---|---scan_window---|
>>> |-step_window-|
>>> |---historical_window---|---scan_window---|
* scan_window: the window size in seconds to detect change point
* historical_window: the window size in seconds to provide historical data
* step_window: the window size in seconds to specify the step size between two scans
Args:
data: :class:`kats.consts.TimeSeriesData` object representing the data
historical_data: :class:`kats.consts.TimeSeriesData` object representing the history.
Returns:
The anomaly response contains the anomaly socres.
"""
# get parameters
scan_window = self.scan_window
historical_window = self.historical_window
step_window = self.step_window
threshold = self.threshold
delta_std_ratio = self.delta_std_ratio
magnitude_quantile = self.magnitude_quantile
magnitude_ratio = self.magnitude_ratio
change_directions = self.change_directions
score_func = self.score_func
remove_seasonality = self.remove_seasonality
scan_window = pd.Timedelta(scan_window, unit="s")
historical_window = pd.Timedelta(historical_window, unit="s")
# pull all the data in historical data
if historical_data is not None:
# make a copy of historical data
historical_data = historical_data[:]
historical_data.extend(data, validate=False)
else:
# When historical_data is not provided, will use part of data as
# historical_data, and fill with zero anomaly score.
historical_data = data[:]
frequency = historical_data.freq_to_timedelta()
if frequency is None or frequency is pd.NaT:
# Use the top frequency if any, when not able to infer from data.
freq_counts = (
historical_data.time.diff().value_counts().sort_values(ascending=False)
)
if freq_counts.iloc[0] >= int(len(historical_data)) * 0.8 - 1:
frequency = freq_counts.index[0]
else:
logging.debug(f"freq_counts: {freq_counts}")
raise ValueError("Not able to infer freqency of the time series")
if remove_seasonality:
decomposer_input = historical_data.interpolate(frequency)
# fixing the period to 24 hours as indicated in T81530775
period = int(24 * 60 * 60 / frequency.total_seconds())
decomposer = TimeSeriesDecomposition(
decomposer_input,
period=period,
robust=True,
seasonal_deg=0,
trend_deg=1,
low_pass_deg=1,
low_pass_jump=int((period + 1) * 0.15), # save run time
seasonal_jump=1,
trend_jump=int((period + 1) * 0.15), # save run time
)
decomp = decomposer.decomposer()
historical_data_time_idx = decomp["rem"].time.isin(historical_data.time)
historical_data.value = pd.Series(
decomp["rem"][historical_data_time_idx].value
+ decomp["trend"][historical_data_time_idx].value,
name=historical_data.value.name,
)
smooth_window = int(scan_window.total_seconds() / frequency.total_seconds())
if smooth_window > 1:
smooth_historical_value = pd.Series(
np.convolve(
historical_data.value.values, np.ones(smooth_window), mode="full"
)[: 1 - smooth_window]
/ smooth_window,
name=historical_data.value.name,
)
smooth_historical_data = TimeSeriesData(
time=historical_data.time, value=smooth_historical_value
)
else:
smooth_historical_data = historical_data
anomaly_start_time = max(
historical_data.time.iloc[0] + historical_window, data.time.iloc[0]
)
if anomaly_start_time > historical_data.time.iloc[-1]:
# if len(all data) is smaller than historical window return zero score
return AnomalyResponse(
scores=self._predict(smooth_historical_data[-len(data) :], score_func),
# pyre-fixme[6]: Expected `ConfidenceBand` for 2nd param but got `None`.
confidence_band=None,
predicted_ts=None,
anomaly_magnitude_ts=self._zeros_ts(data),
stat_sig_ts=None,
)
anomaly_start_idx = self._time2idx(data, anomaly_start_time, "right")
anomaly_start_time = data.time.iloc[anomaly_start_idx]
score_tsd = self._zeros_ts(data[:anomaly_start_idx])
if (
historical_data.time.iloc[-1] - historical_data.time.iloc[0] + frequency
<= scan_window
):
# if len(all data) is smaller than scan data return zero score
return AnomalyResponse(
scores=self._predict(smooth_historical_data[-len(data) :], score_func),
# pyre-fixme[6]: Expected `ConfidenceBand` for 2nd param but got `None`.
confidence_band=None,
predicted_ts=None,
anomaly_magnitude_ts=self._zeros_ts(data),
stat_sig_ts=None,
)
if step_window is None:
# if step window is not provide use the time range of data or
# half of the scan_window.
step_window = min(
scan_window / 2,
(data.time.iloc[-1] - data.time.iloc[0])
+ frequency, # to include the last data point
)
else:
step_window = pd.Timedelta(step_window, unit="s")
for start_time in pd.date_range(
anomaly_start_time,
min(
data.time.iloc[-1]
+ frequency
- step_window, # to include last data point
data.time.iloc[-1], # make sure start_time won't beyond last data time
),
freq=step_window,
):
logging.debug(f"start_time {start_time}")
historical_start = self._time2idx(
historical_data, start_time - historical_window, "right"
)
logging.debug(f"historical_start {historical_start}")
historical_end = self._time2idx(historical_data, start_time, "right")
logging.debug(f"historical_end {historical_end}")
scan_end = self._time2idx(historical_data, start_time + step_window, "left")
logging.debug(f"scan_end {scan_end}")
in_data = historical_data[historical_end : scan_end + 1]
if len(in_data) == 0:
# skip if there is no data in the step_window
continue
in_hist = historical_data[historical_start:historical_end]
self._fit(
in_data,
in_hist,
# pyre-fixme[6]: Expected `int` for 3rd param but got `Timedelta`.
scan_window=scan_window,
threshold=threshold,
delta_std_ratio=delta_std_ratio,
magnitude_quantile=magnitude_quantile,
magnitude_ratio=magnitude_ratio,
change_directions=change_directions,
)
score_tsd.extend(
self._predict(
smooth_historical_data[historical_end : scan_end + 1],
score_func=score_func,
),
validate=False,
)
# Handle the remaining data
remain_data_len = len(data) - len(score_tsd)
if remain_data_len > 0:
scan_end = len(historical_data)
historical_end = len(historical_data) - remain_data_len
historical_start = self._time2idx(
historical_data,
historical_data.time.iloc[historical_end] - historical_window,
"left",
)
in_data = historical_data[historical_end:scan_end]
in_hist = historical_data[historical_start:historical_end]
self._fit(
in_data,
in_hist,
# pyre-fixme[6]: Expected `ConfidenceBand` for 2nd param but got `None`.
# pyre-fixme[6]: Expected `int` for 3rd param but got `Timedelta`.
scan_window=scan_window,
threshold=threshold,
delta_std_ratio=delta_std_ratio,
magnitude_quantile=magnitude_quantile,
magnitude_ratio=magnitude_ratio,
change_directions=change_directions,
)
score_tsd.extend(
self._predict(
smooth_historical_data[historical_end:scan_end],
score_func=score_func,
),
validate=False,
)
return AnomalyResponse(
scores=score_tsd,
# pyre-fixme[6]: Expected `ConfidenceBand` for 2nd param but got `None`.
confidence_band=None,
predicted_ts=None,
anomaly_magnitude_ts=self._zeros_ts(data),
stat_sig_ts=None,
)
def _time2idx(self, tsd: TimeSeriesData, time: datetime, direction: str) -> int:
"""
This function get the index of the TimeSeries data given a datatime.
left takes the index on the left of the time stamp (inclusive)
right takes the index on the right of the time stamp (exclusive)
"""
if direction == "right":
return np.argwhere((tsd.time >= time).values).min()
elif direction == "left":
return np.argwhere((tsd.time < time).values).max()
else:
raise ValueError("direction can only be right or left")
# pyre-fixme[14]: `fit` overrides method defined in `DetectorModel` inconsistently.
def fit(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
) -> None:
self.fit_predict(data, historical_data)
# pyre-fixme[14]: `predict` overrides method defined in `DetectorModel`
# inconsistently.
def predict(
self, data: TimeSeriesData, historical_data: Optional[TimeSeriesData] = None
) -> AnomalyResponse:
"""
predict is not implemented
"""
raise ValueError("predict is not implemented, call fit_predict() instead")
|
the-stack_0_25852
|
# vim: ts=4:sw=4:et:cc=120
import datetime
import hashlib
import io
import logging
import os.path
import pickle
import shutil
import tempfile
import uuid
import ace_api
import saq
from saq.analysis import RootAnalysis
from saq.constants import *
from saq.database import acquire_lock, use_db
from saq.test import *
from api.cloudphish.test import CloudphishTestCase, TEST_URL
from saq.util import *
import pytz
import tzlocal
class TestCase(ACEEngineTestCase):
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
self.start_api_server()
ace_api.set_default_remote_host(saq.API_PREFIX)
ace_api.set_default_ssl_ca_path(saq.CONFIG['SSL']['ca_chain_path'])
def test_ping(self):
result = ace_api.ping()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
self.assertEquals(result['result'], 'pong')
def test_get_supported_api_version(self):
result = ace_api.get_supported_api_version()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
self.assertEquals(result['result'], 1)
@use_db
def test_get_valid_companies(self, db, c):
result = ace_api.get_valid_companies()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
self.assertTrue(isinstance(result['result'], list))
lookup = {}
c.execute("SELECT id, name FROM company")
for _id, _name in c:
lookup[_id] = _name
self.assertEquals(len(lookup), len(result['result']))
for r in result['result']:
self.assertTrue(r['id'] in lookup and lookup[r['id']] == r['name'])
result = ace_api.get_valid_companies()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
self.assertTrue(isinstance(result['result'], list))
def test_get_valid_observables(self):
from saq.constants import VALID_OBSERVABLE_TYPES, OBSERVABLE_DESCRIPTIONS, DEPRECATED_OBSERVABLES
result = ace_api.get_valid_observables()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
self.assertTrue(isinstance(result['result'], list))
for r in result['result']:
self.assertTrue(r['name'] in VALID_OBSERVABLE_TYPES)
self.assertEquals(OBSERVABLE_DESCRIPTIONS[r['name']], r['description'])
active_observables = set(VALID_OBSERVABLE_TYPES) - set(DEPRECATED_OBSERVABLES)
self.assertEquals(len(active_observables), len(result['result']))
def test_get_valid_directives(self):
from saq.constants import VALID_DIRECTIVES, DIRECTIVE_DESCRIPTIONS
result = ace_api.get_valid_directives()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
self.assertTrue(isinstance(result['result'], list))
for r in result['result']:
self.assertTrue(r['name'] in VALID_DIRECTIVES)
self.assertEquals(DIRECTIVE_DESCRIPTIONS[r['name']], r['description'])
def _get_submit_time(self):
return datetime.datetime(2017, 11, 11, hour=7, minute=36, second=1, microsecond=1)
def _get_localized_submit_time(self):
return ace_api.LOCAL_TIMEZONE.localize(self._get_submit_time()).astimezone(pytz.UTC)
def _submit(self, analysis_mode=None,
tool=None,
tool_instance=None,
type=None,
description=None,
details=None,
event_time=None,
observables=None,
tags=None):
temp_path = os.path.join(saq.SAQ_HOME, saq.TEMP_DIR, 'submit_test.dat')
temp_data = os.urandom(1024)
with open(temp_path, 'wb') as fp:
fp.write(temp_data)
try:
with open(temp_path, 'rb') as fp:
return ace_api.submit(
analysis_mode='test_empty' if analysis_mode is None else analysis_mode,
tool='unittest_tool' if tool is None else tool,
tool_instance='unittest_tool_instance' if tool_instance is None else tool_instance,
type='unittest_type' if type is None else type,
description='testing' if description is None else description,
details={'hello': 'world'} if details is None else details,
event_time=self._get_submit_time() if event_time is None else event_time,
observables=[
{ 'type': 'ipv4', 'value': '1.2.3.4', 'time': self._get_submit_time(), 'tags': [ 'tag_1', 'tag_2' ], 'directives': [ 'no_scan' ], 'limited_analysis': ['basic_test'] },
{ 'type': 'user', 'value': 'test_user', 'time': self._get_submit_time() },
] if observables is None else observables,
tags=[ 'alert_tag_1', 'alert_tag_2' ] if tags is None else tags,
files=[('sample.dat', io.BytesIO(b'Hello, world!')),
('submit_test.dat', fp)])
finally:
os.remove(temp_path)
@use_db
def test_submit(self, db, c):
result = self._submit()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
# make sure this actually uploaded
root = RootAnalysis(storage_dir=workload_storage_dir(uuid))
root.load()
self.assertEquals(root.analysis_mode, 'test_empty')
self.assertEquals(root.tool, 'unittest_tool')
self.assertEquals(root.tool_instance, 'unittest_tool_instance')
self.assertEquals(root.alert_type, 'unittest_type')
self.assertEquals(root.description, 'testing')
self.assertEquals(root.details, {'hello': 'world'})
self.assertEquals(root.event_time, self._get_localized_submit_time())
self.assertEquals(root.tags[0].name, 'alert_tag_1')
self.assertEquals(root.tags[1].name, 'alert_tag_2')
# NOTE that this is 4 instead of 2 since adding a file adds a F_FILE observable type
self.assertEquals(len(root.all_observables), 4)
o = root.find_observable(lambda o: o.type == 'ipv4')
self.assertIsNotNone(o)
self.assertEquals(o.value, '1.2.3.4')
self.assertEquals(len(o.tags), 2)
self.assertTrue(o.has_directive('no_scan'))
self.assertTrue('basic_test' in o.limited_analysis)
o = root.find_observable(lambda o: o.type == 'file' and o.value == 'sample.dat')
self.assertIsNotNone(o)
with open(os.path.join(root.storage_dir, o.value), 'rb') as fp:
self.assertEquals(fp.read(), b'Hello, world!')
o = root.find_observable(lambda o: o.type == 'file' and o.value == 'submit_test.dat')
self.assertIsNotNone(o)
self.assertEquals(os.path.getsize(os.path.join(root.storage_dir, o.value)), 1024)
# we should see a single workload entry
c.execute("SELECT id, uuid, node_id, analysis_mode FROM workload WHERE uuid = %s", (uuid,))
row = c.fetchone()
self.assertIsNotNone(row)
self.assertIsNotNone(row[0])
self.assertEquals(row[1], uuid)
self.assertIsNotNone(row[2])
self.assertEquals(row[3], 'test_empty')
@use_db
def test_resubmit(self, db, c):
# submit something so we have something to resubmit
result = self._submit(analysis_mode=ANALYSIS_MODE_CORRELATION)
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
# make sure this actually uploaded
root = RootAnalysis(storage_dir=storage_dir_from_uuid(uuid))
root.load()
self.assertEquals(root.analysis_mode, ANALYSIS_MODE_CORRELATION)
self.assertEquals(root.tool, 'unittest_tool')
self.assertEquals(root.tool_instance, 'unittest_tool_instance')
self.assertEquals(root.alert_type, 'unittest_type')
self.assertEquals(root.description, 'testing')
self.assertEquals(root.details, {'hello': 'world'})
self.assertEquals(root.event_time, self._get_localized_submit_time())
self.assertEquals(root.tags[0].name, 'alert_tag_1')
self.assertEquals(root.tags[1].name, 'alert_tag_2')
# NOTE that this is 4 instead of 2 since adding a file adds a F_FILE observable type
self.assertEquals(len(root.all_observables), 4)
o = root.find_observable(lambda o: o.type == 'ipv4')
self.assertIsNotNone(o)
self.assertEquals(o.value, '1.2.3.4')
self.assertEquals(len(o.tags), 2)
self.assertTrue(o.has_directive('no_scan'))
self.assertTrue('basic_test' in o.limited_analysis)
o = root.find_observable(lambda o: o.type == 'file' and o.value == 'sample.dat')
self.assertIsNotNone(o)
with open(os.path.join(root.storage_dir, o.value), 'rb') as fp:
self.assertEquals(fp.read(), b'Hello, world!')
o = root.find_observable(lambda o: o.type == 'file' and o.value == 'submit_test.dat')
self.assertIsNotNone(o)
self.assertEquals(os.path.getsize(os.path.join(root.storage_dir, o.value)), 1024)
# we should see a single workload entry
c.execute("SELECT id, uuid, node_id, analysis_mode FROM workload WHERE uuid = %s", (uuid,))
row = c.fetchone()
self.assertIsNotNone(row)
self.assertIsNotNone(row[0])
self.assertEquals(row[1], uuid)
self.assertIsNotNone(row[2])
self.assertEquals(row[3], ANALYSIS_MODE_CORRELATION)
# now resubmit the alert
result = ace_api.resubmit_alert(uuid)
self.assertFalse('error' in result)
def test_submit_with_utc_timezone(self):
# make sure we can submit with a UTC timezone already set
result = self._submit(event_time=self._get_localized_submit_time())
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
root = RootAnalysis(storage_dir=workload_storage_dir(uuid))
root.load()
self.assertEquals(root.event_time, self._get_localized_submit_time())
def test_submit_with_other_timezone(self):
# make sure we can submit with another timezone already set
result = self._submit(event_time=self._get_localized_submit_time().astimezone(pytz.timezone('US/Eastern')))
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
root = RootAnalysis(storage_dir=workload_storage_dir(uuid))
root.load()
self.assertEquals(root.event_time, self._get_localized_submit_time())
def test_get_analysis(self):
result = self._submit()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
result = ace_api.get_analysis(uuid)
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertEquals(result['analysis_mode'], 'test_empty')
self.assertEquals(result['tool'], 'unittest_tool')
self.assertEquals(result['tool_instance'], 'unittest_tool_instance')
self.assertEquals(result['type'], 'unittest_type')
self.assertEquals(result['description'], 'testing')
self.assertEquals(result['event_time'], '2017-11-11T07:36:01.000001+0000')
self.assertEquals(result['tags'][0], 'alert_tag_1')
self.assertEquals(result['tags'][1], 'alert_tag_2')
self.assertEquals(len(result['observable_store']), 4)
# the details should be a file_path reference
self.assertTrue(isinstance(result['details'], dict))
self.assertTrue('file_path' in result['details'])
self.assertTrue(result['details']['file_path'].startswith('RootAnalysis_'))
def test_get_analysis_details(self):
result = self._submit()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
result = ace_api.get_analysis(uuid)
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
details_result = ace_api.get_analysis_details(uuid, result['details']['file_path'])
self.assertIsNotNone(details_result)
details_result = details_result['result']
self.assertTrue('hello' in details_result)
self.assertEquals(details_result['hello'], 'world')
def test_get_analysis_file(self):
result = self._submit()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
result = ace_api.get_analysis(uuid)
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
# first test getting a file by uuid
file_uuid = None
for o_uuid in result['observables']:
o = result['observable_store'][o_uuid]
if o['type'] == 'file' and o['value'] == 'sample.dat':
file_uuid = o_uuid
break
self.assertIsNotNone(file_uuid)
output_path = os.path.join(saq.SAQ_HOME, saq.TEMP_DIR, 'get_file_test.dat')
self.assertTrue(ace_api.get_analysis_file(uuid, file_uuid, output_file=output_path))
with open(output_path, 'rb') as fp:
self.assertEquals(fp.read(), b'Hello, world!')
# same thing but with passing a file pointer
with open(output_path, 'wb') as fp:
self.assertTrue(ace_api.get_analysis_file(uuid, file_uuid, output_fp=fp))
# now test by using the file name
self.assertTrue(ace_api.get_analysis_file(uuid, 'sample.dat', output_file=output_path))
with open(output_path, 'rb') as fp:
self.assertEquals(fp.read(), b'Hello, world!')
def test_get_analysis_status(self):
result = self._submit()
self.assertIsNotNone(result)
self.assertTrue('result' in result)
result = result['result']
self.assertIsNotNone(result['uuid'])
uuid = result['uuid']
result = ace_api.get_analysis_status(uuid)
self.assertIsNotNone(result)
result = result['result']
self.assertTrue('workload' in result)
self.assertTrue('delayed_analysis' in result)
self.assertTrue('locks' in result)
self.assertTrue('alert' in result)
self.assertEquals(result['alert'], None)
self.assertEquals(result['delayed_analysis'], [])
self.assertIsNone(result['locks'])
self.assertTrue(isinstance(result['workload']['id'], int))
self.assertEquals(result['workload']['uuid'], uuid)
self.assertIsNotNone(result['workload']['node_id'])
self.assertEquals(result['workload']['analysis_mode'], 'test_empty')
self.assertTrue(isinstance(parse_event_time(result['workload']['insert_date']), datetime.datetime))
def test_download(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
root.details = { 'hello': 'world' }
root.save()
temp_dir = tempfile.mkdtemp(dir=saq.TEMP_DIR)
try:
result = ace_api.download(root.uuid, temp_dir)
self.assertTrue(os.path.join(temp_dir, 'data.json'))
root = RootAnalysis(storage_dir=temp_dir)
root.load()
self.assertEquals(root.details, { 'hello': 'world' })
finally:
shutil.rmtree(temp_dir)
def test_upload(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), storage_dir=os.path.join(saq.TEMP_DIR, 'unittest'))
root.initialize_storage()
root.details = { 'hello': 'world' }
root.save()
result = ace_api.upload(root.uuid, root.storage_dir)
self.assertTrue(result['result'])
# uploads go straight into saq.DATA_DIR
# XXX I don't think we need uploads at all
root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
self.assertEquals(root.details, { 'hello': 'world' })
def test_clear(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
root.details = { 'hello': 'world' }
root.save()
self.assertTrue(os.path.exists(root.storage_dir))
lock_uuid = str(uuid.uuid4())
self.assertTrue(acquire_lock(root.uuid, lock_uuid))
result = ace_api.clear(root.uuid, lock_uuid)
self.assertFalse(os.path.exists(root.storage_dir))
def test_clear_invalid_lock_uuid(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
root.details = { 'hello': 'world' }
root.save()
self.assertTrue(os.path.exists(root.storage_dir))
lock_uuid = str(uuid.uuid4())
self.assertTrue(acquire_lock(root.uuid, lock_uuid))
lock_uuid = str(uuid.uuid4())
with self.assertRaises(Exception):
self.assertFalse(ace_api.clear(root.uuid, lock_uuid))
self.assertTrue(os.path.exists(root.storage_dir))
def test_legacy_submit(self):
alert = ace_api.Alert(description='Test Alert')
alert.add_observable(F_IPV4, '1.2.3.4', local_time(), directives=[DIRECTIVE_NO_SCAN])
alert.add_tag('test')
temp_path = os.path.join(saq.TEMP_DIR, 'test.txt')
with open(temp_path, 'w') as fp:
fp.write('test')
alert.add_attachment_link(temp_path, 'dest/test.txt')
alert.submit(f'https://{saq.API_PREFIX}', ssl_verification=saq.CONFIG['SSL']['ca_chain_path'])
self.assertTrue(validate_uuid(alert.uuid))
root = RootAnalysis(storage_dir=storage_dir_from_uuid(alert.uuid))
root.load()
self.assertEquals(root.description, 'Test Alert')
ipv4_observable = root.find_observable(lambda o: o.type == F_IPV4)
self.assertIsNotNone(ipv4_observable)
self.assertEquals(ipv4_observable.value, '1.2.3.4')
self.assertTrue(ipv4_observable.has_directive(DIRECTIVE_NO_SCAN))
file_observable = root.find_observable(lambda o: o.type == F_FILE)
self.assertIsNotNone(file_observable)
self.assertEquals(file_observable.value, 'dest/test.txt')
with open(os.path.join(root.storage_dir, file_observable.value), 'r') as fp:
self.assertEquals(fp.read(), 'test')
def test_legacy_import(self):
from ace_client_lib.client import Alert
from ace_api import Alert as Alert_2
# these two should be the same thing
self.assertTrue(Alert is Alert_2)
def test_legacy_failed_submit(self):
self.stop_api_server()
alert = ace_api.Alert(description='Test Alert')
alert.add_observable(F_IPV4, '1.2.3.4', local_time(), directives=[DIRECTIVE_NO_SCAN])
alert.add_tag('test')
temp_path = os.path.join(saq.TEMP_DIR, 'test.txt')
with open(temp_path, 'w') as fp:
fp.write('test')
alert.add_attachment_link(temp_path, 'dest/test.txt')
with self.assertRaises(Exception):
alert.submit(f'https://{saq.API_PREFIX}', ssl_verification=saq.CONFIG['SSL']['ca_chain_path'])
self.assertEquals(log_count('unable to submit '), 1)
# the .saq_alerts directory should have a single subdirectory
dir_list = os.listdir('.saq_alerts')
self.assertEquals(len(dir_list), 1)
# load the alert
target_path = os.path.join('.saq_alerts', dir_list[0], 'alert')
with open(target_path, 'rb') as fp:
new_alert = pickle.load(fp)
self.assertEquals(new_alert.submit_kwargs, alert.submit_kwargs)
def test_failed_submit(self):
self.stop_api_server()
analysis = ace_api.Analysis(description='Test Analysis submit')
analysis.add_observable(F_IPV4, '1.2.3.4', local_time(), directives=[DIRECTIVE_NO_SCAN])
analysis.add_tag('test')
analysis.add_user('test_user')
temp_path = os.path.join(saq.TEMP_DIR, 'test.txt')
with open(temp_path, 'w') as fp:
fp.write('test')
analysis.add_file(temp_path, relative_storage_path='dest/test.txt')
with self.assertRaises(Exception):
analysis.submit(f'https://{saq.API_PREFIX}', ssl_verification=saq.CONFIG['SSL']['ca_chain_path'])
self.assertEquals(log_count('unable to submit '), 1)
# the .saq_alerts directory should have a single subdirectory
dir_list = os.listdir('.saq_alerts')
self.assertEquals(len(dir_list), 1)
# load the analysis object
target_path = os.path.join('.saq_alerts', dir_list[0], 'alert')
with open(target_path, 'rb') as fp:
new_analysis = pickle.load(fp)
self.assertEquals(new_analysis.submit_kwargs, analysis.submit_kwargs)
def test_submit_failed_alerts(self):
self.stop_api_server()
alert = ace_api.Alert(description='Test Alert')
alert.add_observable(F_IPV4, '1.2.3.4', local_time(), directives=[DIRECTIVE_NO_SCAN])
alert.add_tag('test')
temp_path = os.path.join(saq.TEMP_DIR, 'test.txt')
with open(temp_path, 'w') as fp:
fp.write('test')
alert.add_attachment_link(temp_path, 'dest/test.txt')
with self.assertRaises(Exception):
uuid = alert.submit(f'https://{saq.API_PREFIX}', ssl_verification=saq.CONFIG['SSL']['ca_chain_path'])
self.assertEquals(log_count('unable to submit '), 1)
# the .saq_alerts directory should have a single subdirectory
dir_list = os.listdir('.saq_alerts')
self.assertEquals(len(dir_list), 1)
# load the alert
target_path = os.path.join('.saq_alerts', dir_list[0], 'alert')
with open(target_path, 'rb') as fp:
new_alert = pickle.load(fp)
self.assertEquals(new_alert.submit_kwargs, alert.submit_kwargs)
# try to submit it using submit_failed_alerts
self.start_api_server()
ace_api.submit_failed_alerts(delete_on_success=True)
# this directory should be cleared out
dir_list = os.listdir('.saq_alerts')
self.assertEquals(len(dir_list), 0)
def test_submit_failed_analysis(self):
self.stop_api_server()
analysis = ace_api.Analysis(description='Test Analysis')
analysis.add_observable(F_IPV4, '1.2.3.4', local_time(), directives=[DIRECTIVE_NO_SCAN])
analysis.add_tag('test')
temp_path = os.path.join(saq.TEMP_DIR, 'test.txt')
with open(temp_path, 'w') as fp:
fp.write('test')
analysis.add_file(temp_path, relative_storage_path='dest/test.txt')
with self.assertRaises(Exception):
uuid = analysis.submit(f'https://{saq.API_PREFIX}', ssl_verification=saq.CONFIG['SSL']['ca_chain_path'])
self.assertEquals(log_count('unable to submit '), 1)
# the .saq_alerts directory should have a single subdirectory
dir_list = os.listdir('.saq_alerts')
self.assertEquals(len(dir_list), 1)
# load the alert
target_path = os.path.join('.saq_alerts', dir_list[0], 'alert')
with open(target_path, 'rb') as fp:
new_analysis = pickle.load(fp)
self.assertEquals(new_analysis.submit_kwargs, analysis.submit_kwargs)
# did we actually write data to the file?
data_test = None
with open(os.path.join('.saq_alerts', new_analysis.uuid, 'dest/test.txt'), 'r') as fp:
data_test = fp.read()
self.assertEquals(data_test, 'test')
# try to submit it using submit_failed_alerts
self.start_api_server()
ace_api.submit_failed_alerts(delete_on_success=True)
# this directory should be cleared out
dir_list = os.listdir('.saq_alerts')
self.assertEquals(len(dir_list), 0)
def test_analysis_file_handling(self):
analysis = ace_api.Analysis(description='Test Analysis')
# add a normal file
normal_file_path = self.create_test_file('normal.txt')
analysis.add_file(normal_file_path)
# add a normal file, passing in the file pointer
fp_file_path = self.create_test_file('fp.txt')
fp = open(fp_file_path, 'rb')
analysis.add_file(fp_file_path, fp)
# add a normal file but tell it to go into a subdirectory
subdir_file_path = self.create_test_file('subdir.txt')
analysis.add_file(subdir_file_path, relative_storage_path='subdir/subdir.txt')
# add a file passing the contents as a string
analysis.add_file('str.txt', 'This is a string.')
# add a file passing the contents as a bytes
analysis.add_file('bytes.txt', b'This is a bytes.')
result = analysis.submit()
# make sure it got our files
io_buffer = io.BytesIO()
ace_api.get_analysis_file(result.uuid, 'normal.txt', output_fp=io_buffer)
with open(normal_file_path, 'rb') as fp:
self.assertEquals(fp.read(), io_buffer.getvalue())
io_buffer = io.BytesIO()
ace_api.get_analysis_file(result.uuid, 'fp.txt', output_fp=io_buffer)
with open(fp_file_path, 'rb') as fp:
self.assertEquals(fp.read(), io_buffer.getvalue())
#io_buffer = io.BytesIO()
#ace_api.get_analysis_file(result.uuid, 'subdir/subdir.txt', output_fp=io_buffer)
#with open(subdir_file_path, 'rb') as fp:
#self.assertEquals(fp.read(), io_buffer.getvalue())
io_buffer = io.BytesIO()
ace_api.get_analysis_file(result.uuid, 'str.txt', output_fp=io_buffer)
self.assertEquals(b'This is a string.', io_buffer.getvalue())
io_buffer = io.BytesIO()
ace_api.get_analysis_file(result.uuid, 'bytes.txt', output_fp=io_buffer)
self.assertEquals(b'This is a bytes.', io_buffer.getvalue())
class CloudphishAPITestCase(CloudphishTestCase, ACEEngineTestCase):
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
self.start_api_server()
ace_api.set_default_remote_host(saq.API_PREFIX)
ace_api.set_default_ssl_ca_path(saq.CONFIG['SSL']['ca_chain_path'])
def test_cloudphish_api(self):
import saq.cloudphish
submission_result = ace_api.cloudphish_submit(TEST_URL)
for key in [ saq.cloudphish.KEY_RESULT,
saq.cloudphish.KEY_DETAILS,
saq.cloudphish.KEY_STATUS,
saq.cloudphish.KEY_ANALYSIS_RESULT,
saq.cloudphish.KEY_HTTP_RESULT,
saq.cloudphish.KEY_HTTP_MESSAGE,
saq.cloudphish.KEY_SHA256_CONTENT,
saq.cloudphish.KEY_SHA256_URL,
saq.cloudphish.KEY_LOCATION,
saq.cloudphish.KEY_FILE_NAME,
saq.cloudphish.KEY_UUID, ]:
self.assertTrue(key in submission_result)
self.assertEquals(submission_result[saq.cloudphish.KEY_RESULT], saq.cloudphish.RESULT_OK)
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_DETAILS])
self.assertEquals(submission_result[saq.cloudphish.KEY_STATUS], saq.cloudphish.STATUS_NEW)
self.assertEquals(submission_result[saq.cloudphish.KEY_ANALYSIS_RESULT], saq.cloudphish.SCAN_RESULT_UNKNOWN)
self.assertIsNone(submission_result[saq.cloudphish.KEY_HTTP_RESULT])
self.assertIsNone(submission_result[saq.cloudphish.KEY_HTTP_MESSAGE])
self.assertIsNone(submission_result[saq.cloudphish.KEY_SHA256_CONTENT])
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_SHA256_URL])
self.assertIsNone(submission_result[saq.cloudphish.KEY_LOCATION])
self.assertIsNone(submission_result[saq.cloudphish.KEY_FILE_NAME])
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_UUID])
# now we start an engine to work on cloudphish analysis
engine = TestEngine(analysis_pools={ANALYSIS_MODE_CLOUDPHISH: 1},
local_analysis_modes=[ANALYSIS_MODE_CLOUDPHISH])
engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
# force this analysis to become an alert
engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)
engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH)
engine.controlled_stop()
engine.start()
engine.wait()
submission_result = ace_api.cloudphish_submit(TEST_URL)
self.assertEquals(submission_result[saq.cloudphish.KEY_RESULT], saq.cloudphish.RESULT_OK)
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_DETAILS])
self.assertEquals(submission_result[saq.cloudphish.KEY_STATUS], saq.cloudphish.STATUS_ANALYZED)
self.assertEquals(submission_result[saq.cloudphish.KEY_ANALYSIS_RESULT], saq.cloudphish.SCAN_RESULT_ALERT)
self.assertEquals(submission_result[saq.cloudphish.KEY_HTTP_RESULT], 200)
self.assertEquals(submission_result[saq.cloudphish.KEY_HTTP_MESSAGE], 'OK')
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_SHA256_CONTENT])
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_SHA256_URL])
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_LOCATION])
self.assertEquals(submission_result[saq.cloudphish.KEY_FILE_NAME], 'Payment_Advice.pdf')
self.assertIsNotNone(submission_result[saq.cloudphish.KEY_UUID])
# attempt to download the contents of the url
output_path = os.path.join(saq.TEMP_DIR, 'cloudphish.download')
download_result = ace_api.cloudphish_download(sha256=submission_result[saq.cloudphish.KEY_SHA256_URL],
output_path=output_path)
# make sure we can download the file
with open(output_path, 'rb') as fp:
data = fp.read()
hasher = hashlib.sha256()
hasher.update(data)
self.assertEquals(hasher.hexdigest().lower(), submission_result[saq.cloudphish.KEY_SHA256_CONTENT].lower())
# make sure we can clear the alert for this url
self.assertTrue(ace_api.cloudphish_clear_alert(sha256=submission_result[saq.cloudphish.KEY_SHA256_URL]))
# and verify that
submission_result = ace_api.cloudphish_submit(TEST_URL)
self.assertEquals(submission_result[saq.cloudphish.KEY_ANALYSIS_RESULT], saq.cloudphish.SCAN_RESULT_CLEAR)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.