filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_8828 | from multiprocessing.connection import Listener
from threading import Thread
import pickle
class RPCHandler:
def __init__(self):
self._functions = {}
def register_function(self, func):
self._functions[func.__name__] = func
def handle_connection(self, connection):
try:
while True:
# Receive a message
func_name, args, kwargs = pickle.loads(connection.recv())
# Run the RPC and send a response
try:
r = self._functions[func_name](*args, **kwargs)
connection.send(pickle.dumps(r))
except Exception as e:
connection.send(pickle.dumps(e))
except EOFError:
pass
def rpc_server(handler, address, authkey):
sock = Listener(address, authkey=authkey)
while True:
client = sock.accept()
t = Thread(target=handler.handle_connection, args=(client,))
t.daemon = True
t.start()
|
the-stack_0_8829 | from eth_account.account import Account
from web3.types import TxParams
from typing import TypedDict, List
from hexbytes import HexBytes
FlashbotsBundleTx = TypedDict(
"FlashbotsBundleTx",
{
"transaction": TxParams,
"signer": Account,
},
)
FlashbotsBundleRawTx = TypedDict(
"FlashbotsBundleRawTx",
{
"signed_transaction": str,
},
)
FlashbotsBundleDictTx = TypedDict(
"FlashbotsBundleDictTx",
{
"blockHash": HexBytes,
"blockNumber": int,
"from": str,
"gas": int,
"gasPrice": int,
"hash": HexBytes,
"input": str,
"nonce": int,
"r": HexBytes,
"s": HexBytes,
"to": str,
"transactionIndex": int,
"type": str,
"v": int,
"value": int,
},
)
FlashbotsOpts = TypedDict(
"FlashbotsOpts",
{"minTimestamp": int, "maxTimestamp": int, "revertingTxHashes": List[str]},
)
# Type missing from eth_account, not really a part of flashbots web3 per sé
SignTx = TypedDict(
"SignTx",
{
"nonce": int,
"chainId": int,
"to": str,
"data": str,
"value": int,
"gas": int,
"gasPrice": int,
},
total=False,
)
|
the-stack_0_8831 | # import codes.basic_functions.ourpretrainedmodels as pretrainedmodels
import pretrainedmodels
import torch
import torch.nn as nn
class ImagenetEnsemble(nn.Module):
def __init__(self, ):
super(ImagenetEnsemble, self).__init__()
self.archs = ['resnet34', 'resnet152', 'densenet121']
for model_type in self.archs:
model = pretrainedmodels.__dict__[model_type](
num_classes=1000, pretrained='imagenet').eval()
for param in model.parameters():
param.requires_grad = False
setattr(self, model_type, model)
self.input_size = model.input_size
self.mean = model.mean
self.std = model.std
def forward(self, x):
logits = 0
for arch in self.archs:
model = getattr(self, arch)
logits += model(x)
return logits / len(self.archs)
|
the-stack_0_8833 | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import join
from SCons.Script import ARGUMENTS, AlwaysBuild, Default, DefaultEnvironment
def __getSize(size_type, env):
# FIXME: i don't really know how to do this right. see:
# https://community.platformio.org/t/missing-integers-in-board-extra-flags-in-board-json/821
return str(
env.BoardConfig().get(
"build",
{
# defaults
"size_heap": 1024,
"size_iram": 256,
"size_xram": 65536,
"size_code": 65536,
},
)[size_type]
)
def _parseSdccFlags(flags):
assert flags
if isinstance(flags, list):
flags = " ".join(flags)
flags = str(flags)
parsed_flags = []
unparsed_flags = []
prev_token = ""
for token in flags.split(" "):
if prev_token.startswith("--") and not token.startswith("-"):
parsed_flags.extend([prev_token, token])
prev_token = ""
continue
if prev_token:
unparsed_flags.append(prev_token)
prev_token = token
unparsed_flags.append(prev_token)
return (parsed_flags, unparsed_flags)
env = DefaultEnvironment()
platform = env.PioPlatform()
board_config = env.BoardConfig()
env.Replace(
AR="sdar",
AS="sdas8051",
CC="sdcc",
LD="sdld",
RANLIB="sdranlib",
OBJCOPY="sdobjcopy",
OBJSUFFIX=".rel",
LIBSUFFIX=".lib",
SIZETOOL=join(platform.get_dir(), "builder", "size.py"),
SIZECHECKCMD="$PYTHONEXE $SIZETOOL $SOURCES",
SIZEPRINTCMD='"$PYTHONEXE" $SIZETOOL $SOURCES',
SIZEPROGREGEXP=r"^ROM/EPROM/FLASH\s+[a-fx\d]+\s+[a-fx\d]+\s+(\d+).*",
PROGNAME="firmware",
PROGSUFFIX=".hex",
)
env.Append(
ASFLAGS=["-l", "-s"],
CFLAGS=["--std-sdcc11"],
CCFLAGS=[
"--opt-code-size", # optimize for size
"--peep-return", # peephole optimization for return instructions
"-m%s" % board_config.get("build.cpu"),
],
CPPDEFINES=["F_CPU=$BOARD_F_CPU", "HEAP_SIZE=" + __getSize("size_heap", env)],
LINKFLAGS=[
"-m%s" % board_config.get("build.cpu"),
"--iram-size",
__getSize("size_iram", env),
"--xram-size",
__getSize("size_xram", env),
"--code-size",
__getSize("size_code", env),
"--out-fmt-ihx",
],
)
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["-v"])
# parse manually SDCC flags
if env.get("BUILD_FLAGS"):
_parsed, _unparsed = _parseSdccFlags(env.get("BUILD_FLAGS"))
env.Append(CCFLAGS=_parsed)
env["BUILD_FLAGS"] = _unparsed
project_sdcc_flags = None
if env.get("SRC_BUILD_FLAGS"):
project_sdcc_flags, _unparsed = _parseSdccFlags(env.get("SRC_BUILD_FLAGS"))
env["SRC_BUILD_FLAGS"] = _unparsed
#
# Target: Build executable and linkable firmware
#
target_firm = env.BuildProgram()
if project_sdcc_flags:
env.Import("projenv")
projenv.Append(CCFLAGS=project_sdcc_flags)
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_firm, env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE")
)
AlwaysBuild(target_size)
#
# Target: Upload firmware
#
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
upload_actions = []
if upload_protocol == "stcgal":
f_cpu_khz = int(board_config.get("build.f_cpu").strip("L")) / 1000
stcgal_protocol = board_config.get("upload.stcgal_protocol")
env.Replace(
UPLOADER=join(platform.get_package_dir("tool-stcgal") or "", "stcgal.py"),
UPLOADERFLAGS=[
"-P",
stcgal_protocol,
"-p",
"$UPLOAD_PORT",
"-t",
int(f_cpu_khz),
"-a",
],
UPLOADCMD='"$PYTHONEXE" "$UPLOADER" $UPLOADERFLAGS $SOURCE',
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE"),
]
# CH55x upload tool
elif upload_protocol == "ch55x":
env.Replace(
UPLOADER="vnproch55x",
UPLOADERFLAGS=["-f"],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.bin",
)
upload_actions = [
env.VerboseAction(
" ".join(
[
"$OBJCOPY",
"-I",
"ihex",
"-O",
"binary",
"$SOURCE",
"$BUILD_DIR/${PROGNAME}.bin",
]
),
"Creating binary",
),
env.VerboseAction("$UPLOADCMD", "Uploading ${PROGNAME}.bin"),
]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Setup default targets
#
Default([target_buildprog, target_size])
|
the-stack_0_8836 | import argparse
from collections import deque
import os
import torch
from torch import optim
from tqdm import tqdm
from environments import CartPoleEnv
from evaluation import evaluate_agent
from models import ActorCritic, AIRLDiscriminator, GAILDiscriminator, GMMILDiscriminator, REDDiscriminator
from training import TransitionDataset, adversarial_imitation_update, behavioural_cloning_update, compute_advantages, ppo_update, target_estimation_update
from utils import flatten_list_dicts, lineplot
# Setup
parser = argparse.ArgumentParser(description='IL')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='Random seed')
parser.add_argument('--steps', type=int, default=100000, metavar='T', help='Number of environment steps')
parser.add_argument('--hidden-size', type=int, default=32, metavar='H', help='Hidden size')
parser.add_argument('--discount', type=float, default=0.99, metavar='γ', help='Discount')
parser.add_argument('--trace-decay', type=float, default=0.95, metavar='λ', help='GAE trace decay')
parser.add_argument('--ppo-clip', type=float, default=0.2, metavar='ε', help='PPO clip ratio')
parser.add_argument('--ppo-epochs', type=int, default=4, metavar='K', help='PPO epochs')
parser.add_argument('--value-loss-coeff', type=float, default=0.5, metavar='c1', help='Value loss coefficient')
parser.add_argument('--entropy-loss-coeff', type=float, default=0, metavar='c2', help='Entropy regularisation coefficient')
parser.add_argument('--learning-rate', type=float, default=0.001, metavar='η', help='Learning rate')
parser.add_argument('--batch-size', type=int, default=2048, metavar='B', help='Minibatch size')
parser.add_argument('--max-grad-norm', type=float, default=1, metavar='N', help='Maximum gradient L2 norm')
parser.add_argument('--evaluation-interval', type=int, default=10000, metavar='EI', help='Evaluation interval')
parser.add_argument('--evaluation-episodes', type=int, default=50, metavar='EE', help='Evaluation episodes')
parser.add_argument('--save-trajectories', action='store_true', default=False, help='Store trajectories from agent after training')
parser.add_argument('--imitation', type=str, default='', choices=['AIRL', 'BC', 'GAIL', 'GMMIL', 'RED'], metavar='I', help='Imitation learning algorithm')
parser.add_argument('--state-only', action='store_true', default=False, help='State-only imitation learning')
parser.add_argument('--imitation-epochs', type=int, default=5, metavar='IE', help='Imitation learning epochs')
parser.add_argument('--imitation-batch-size', type=int, default=128, metavar='IB', help='Imitation learning minibatch size')
parser.add_argument('--imitation-replay-size', type=int, default=4, metavar='IRS', help='Imitation learning trajectory replay size')
parser.add_argument('--r1-reg-coeff', type=float, default=1, metavar='γ', help='R1 gradient regularisation coefficient')
args = parser.parse_args()
torch.manual_seed(args.seed)
os.makedirs('results', exist_ok=True)
# Set up environment and models
env = CartPoleEnv()
env.seed(args.seed)
agent = ActorCritic(env.observation_space.shape[0], env.action_space.n, args.hidden_size)
agent_optimiser = optim.RMSprop(agent.parameters(), lr=args.learning_rate)
if args.imitation:
# Set up expert trajectories dataset
expert_trajectories = TransitionDataset(flatten_list_dicts(torch.load('expert_trajectories.pth')))
# Set up discriminator
if args.imitation in ['AIRL', 'GAIL', 'GMMIL', 'RED']:
if args.imitation == 'AIRL':
discriminator = AIRLDiscriminator(env.observation_space.shape[0], env.action_space.n, args.hidden_size, args.discount, state_only=args.state_only)
elif args.imitation == 'GAIL':
discriminator = GAILDiscriminator(env.observation_space.shape[0], env.action_space.n, args.hidden_size, state_only=args.state_only)
elif args.imitation == 'GMMIL':
discriminator = GMMILDiscriminator(env.observation_space.shape[0], env.action_space.n, state_only=args.state_only)
elif args.imitation == 'RED':
discriminator = REDDiscriminator(env.observation_space.shape[0], env.action_space.n, args.hidden_size, state_only=args.state_only)
if args.imitation in ['AIRL', 'GAIL', 'RED']:
discriminator_optimiser = optim.RMSprop(discriminator.parameters(), lr=args.learning_rate)
# Metrics
metrics = dict(train_steps=[], train_returns=[], test_steps=[], test_returns=[])
# Main training loop
state, terminal, episode_return, trajectories, policy_trajectory_replay_buffer = env.reset(), False, 0, [], deque(maxlen=args.imitation_replay_size)
pbar = tqdm(range(1, args.steps + 1), unit_scale=1, smoothing=0)
for step in pbar:
if args.imitation in ['BC', 'RED']:
if step == 1:
for _ in tqdm(range(args.imitation_epochs), leave=False):
if args.imitation == 'BC':
# Perform behavioural cloning updates offline
behavioural_cloning_update(agent, expert_trajectories, agent_optimiser, args.imitation_batch_size)
elif args.imitation == 'RED':
# Train predictor network to match random target network
target_estimation_update(discriminator, expert_trajectories, discriminator_optimiser, args.imitation_batch_size)
if args.imitation != 'BC':
# Collect set of trajectories by running policy π in the environment
policy, value = agent(state)
action = policy.sample()
log_prob_action, entropy = policy.log_prob(action), policy.entropy()
next_state, reward, terminal = env.step(action)
episode_return += reward
trajectories.append(dict(states=state, actions=action, rewards=torch.tensor([reward], dtype=torch.float32), terminals=torch.tensor([terminal], dtype=torch.float32), log_prob_actions=log_prob_action, old_log_prob_actions=log_prob_action.detach(), values=value, entropies=entropy))
state = next_state
if terminal:
# Store metrics and reset environment
metrics['train_steps'].append(step)
metrics['train_returns'].append([episode_return])
pbar.set_description('Step: %i | Return: %f' % (step, episode_return))
state, episode_return = env.reset(), 0
if len(trajectories) >= args.batch_size:
policy_trajectories = flatten_list_dicts(trajectories) # Flatten policy trajectories (into a single batch for efficiency; valid for feedforward networks)
trajectories = [] # Clear the set of trajectories
if args.imitation in ['AIRL', 'GAIL', 'GMMIL', 'RED']:
# Train discriminator and predict rewards
if args.imitation in ['AIRL', 'GAIL']:
# Use a replay buffer of previous trajectories to prevent overfitting to current policy
policy_trajectory_replay_buffer.append(policy_trajectories)
policy_trajectory_replays = flatten_list_dicts(policy_trajectory_replay_buffer)
for _ in tqdm(range(args.imitation_epochs), leave=False):
adversarial_imitation_update(args.imitation, agent, discriminator, expert_trajectories, TransitionDataset(policy_trajectory_replays), discriminator_optimiser, args.imitation_batch_size, args.r1_reg_coeff)
# Predict rewards
with torch.no_grad():
if args.imitation == 'AIRL':
policy_trajectories['rewards'] = discriminator.predict_reward(policy_trajectories['states'], policy_trajectories['actions'], torch.cat([policy_trajectories['states'][1:], next_state]), policy_trajectories['log_prob_actions'].exp(), policy_trajectories['terminals'])
elif args.imitation == 'GAIL':
policy_trajectories['rewards'] = discriminator.predict_reward(policy_trajectories['states'], policy_trajectories['actions'])
elif args.imitation == 'GMMIL':
policy_trajectories['rewards'] = discriminator.predict_reward(policy_trajectories['states'], policy_trajectories['actions'], expert_trajectories['states'], expert_trajectories['actions'])
elif args.imitation == 'RED':
policy_trajectories['rewards'] = discriminator.predict_reward(policy_trajectories['states'], policy_trajectories['actions'])
# Compute rewards-to-go R and generalised advantage estimates ψ based on the current value function V
compute_advantages(policy_trajectories, agent(state)[1], args.discount, args.trace_decay)
# Normalise advantages
policy_trajectories['advantages'] = (policy_trajectories['advantages'] - policy_trajectories['advantages'].mean()) / (policy_trajectories['advantages'].std() + 1e-8)
# Perform PPO updates
for epoch in tqdm(range(args.ppo_epochs), leave=False):
ppo_update(agent, policy_trajectories, agent_optimiser, args.ppo_clip, epoch, args.value_loss_coeff, args.entropy_loss_coeff)
# Evaluate agent and plot metrics
if step % args.evaluation_interval == 0:
metrics['test_steps'].append(step)
metrics['test_returns'].append(evaluate_agent(agent, args.evaluation_episodes, seed=args.seed))
lineplot(metrics['test_steps'], metrics['test_returns'], 'test_returns')
if args.imitation != 'BC': lineplot(metrics['train_steps'], metrics['train_returns'], 'train_returns')
if args.save_trajectories:
# Store trajectories from agent after training
_, trajectories = evaluate_agent(agent, args.evaluation_episodes, return_trajectories=True, seed=args.seed)
torch.save(trajectories, os.path.join('results', 'trajectories.pth'))
# Save agent and metrics
torch.save(agent.state_dict(), os.path.join('results', 'agent.pth'))
if args.imitation in ['AIRL', 'GAIL']: torch.save(discriminator.state_dict(), os.path.join('results', 'discriminator.pth'))
torch.save(metrics, os.path.join('results', 'metrics.pth'))
env.close()
|
the-stack_0_8837 | import tkinter as tk
from tkinter import ttk
from tkinter.font import Font
import re
import math
MAX_LINES = 16
def traverse_up(widget, function, initializer = None):
return function(widget, traverse_up(widget.master, function, initializer) if hasattr(widget, 'master') else initializer)
class AutocompleteEntry(ttk.Frame):
def __init__(self, container, suggestions, textvariable=None, bounding_container=None, window=None, font=None, *args, **kwargs):
super().__init__(container, *args, **kwargs)
self.lb_up = False
self.bounding_container = bounding_container if bounding_container else self
self.container = container
self.suggestions = suggestions
self.window = window
self.var = textvariable if textvariable else tk.StringVar()
self.var.trace('w', self.changed)
self.entry = ttk.Entry(self, textvariable=self.var)
self.entry.pack(fill=tk.X, expand=True)
self.bind("<Configure>", self.configure)
self.bind('<FocusOut>', self.focus_out )
def focus_out(self, event):
if not self.lb_up:
return
print(str(self.focus_get()), repr(str(self.lb)))
if repr(str(self.focus_get())) == repr(str(self.lb)):
return
self.destroy_list_box()
def create_list_box(self):
self.lb = tk.Listbox(self.bounding_container, relief=tk.RAISED, highlightthickness=1, activestyle='none')
self.lb.bind("<Double-Button-1>", self.selection)
self.lb.bind("<Return>", self.selection)
self.lb.bind('<FocusOut>', self.focus_out )
self.lb_up = True
def destroy_list_box(self):
if not self.lb_up:
return
self.lb.destroy()
self.lb_up = False
def position_list_box(self):
if not self.lb_up:
return
x, y, width, height = self.compute_list_box_config()
self.lb.configure(height=math.floor(height / self.min_height))
self.lb.place(in_=self.bounding_container, x=x - self.container.winfo_x(), y=y, width=width) # NOTE: somehow take paddings into consideration
def compute_list_box_config(self):
# place below if distance between lowest points of container and bounding container is more than minimal listbox height
bounding_y = traverse_up(self.bounding_container, lambda widget, acc: widget.winfo_y() + acc if widget and widget.master else acc, 0)
container_y = traverse_up(self.container, lambda widget, acc: widget.winfo_y() + acc if widget and widget.master else acc, 0)
distance = bounding_y + self.bounding_container.winfo_height() - container_y - self.container.winfo_height()
if distance > self.min_height:
overflow = distance - self.max_height
height = math.floor((self.max_height + overflow if overflow < 0 else self.max_height) / self.min_height) * self.min_height
return (
traverse_up(self, lambda widget, acc: widget.winfo_x() + acc if widget and widget.master else acc, 0),
traverse_up(self, lambda widget, acc: widget.winfo_y() + acc if widget and widget.master else acc, self.winfo_height()),
self.winfo_width(),
height
)
# place above
distance = self.container.winfo_y() - self.bounding_container.winfo_y()
if distance > self.min_height:
overflow = distance - self.max_height
height = math.floor((self.max_height + overflow if overflow < 0 else self.max_height) / self.min_height) * self.min_height
return (
traverse_up(self, lambda widget, acc: widget.winfo_x() + acc if widget and widget.master else acc, 0),
traverse_up(self, lambda widget, acc: widget.winfo_y() + acc if widget and widget.master else acc, -height),
self.winfo_width(),
height
)
def changed(self, *args):
if self.var.get() == '' and self.lb_up:
self.destroy_list_box()
return
words = self.comparison()
if words:
if not self.lb_up:
self.create_list_box()
self.lb.delete(0, tk.END)
for w in words:
self.lb.insert(tk.END,w)
self.max_height = min(self.min_height * MAX_LINES, self.min_height * len(words))
self.position_list_box()
else:
self.destroy_list_box()
def selection(self, event):
if not self.lb_up:
return
self.lb.get(tk.ACTIVE)
self.var.set(self.lb.get(tk.ACTIVE))
self.destroy_list_box()
self.entry.icursor(tk.END)
def up(self, event):
if not self.lb_up:
return
index = '0' if self.lb.curselection() == () else self.lb.curselection()[0]
self.lb.selection_clear(first=index)
index = str(max(int(index)-1, 0))
self.lb.selection_set(first=index)
self.lb.event_generate("<<ListboxSelect>>")
def down(self, event):
if not self.lb_up:
return
index = '0' if self.lb.curselection() == () else self.lb.curselection()[0]
self.lb.selection_clear(first=index)
index = str(min(int(index)+1, self.lb.size() - 1)) if index != '0' else '0'
self.lb.selection_set(first=index)
self.lb.event_generate("<<ListboxSelect>>")
def comparison(self):
pattern = re.compile('.*' + self.var.get() + '.*')
return [w for w in self.suggestions if re.match(pattern, w)]
def configure(self, event):
self.min_height = self.winfo_height()
self.position_list_box() |
the-stack_0_8838 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading
import time
from collections import defaultdict
from elasticapm.conf import constants
from elasticapm.utils import compat, is_master_process
from elasticapm.utils.logging import get_logger
from elasticapm.utils.module_import import import_string
from elasticapm.utils.threading import IntervalTimer
logger = get_logger("elasticapm.metrics")
DISTINCT_LABEL_LIMIT = 1000
class MetricsRegistry(object):
def __init__(self, collect_interval, queue_func, tags=None, ignore_patterns=None):
"""
Creates a new metric registry
:param collect_interval: the interval to collect metrics from registered metric sets
:param queue_func: the function to call with the collected metrics
:param tags:
"""
self._collect_interval = collect_interval
self._queue_func = queue_func
self._metricsets = {}
self._tags = tags or {}
self._collect_timer = None
self._ignore_patterns = ignore_patterns or ()
if self._collect_interval:
# we only start the thread if we are not in a uwsgi master process
if not is_master_process():
self._start_collect_timer()
else:
# If we _are_ in a uwsgi master process, we use the postfork hook to start the thread after the fork
compat.postfork(lambda: self._start_collect_timer())
def register(self, class_path):
"""
Register a new metric set
:param class_path: a string with the import path of the metricset class
"""
if class_path in self._metricsets:
return
else:
try:
class_obj = import_string(class_path)
self._metricsets[class_path] = class_obj(self)
except ImportError as e:
logger.warning("Could not register %s metricset: %s", class_path, compat.text_type(e))
def get_metricset(self, class_path):
try:
return self._metricsets[class_path]
except KeyError:
raise MetricSetNotFound(class_path)
def collect(self):
"""
Collect metrics from all registered metric sets and queues them for sending
:return:
"""
logger.debug("Collecting metrics")
for name, metricset in compat.iteritems(self._metricsets):
for data in metricset.collect():
self._queue_func(constants.METRICSET, data)
def _start_collect_timer(self, timeout=None):
timeout = timeout or self._collect_interval
self._collect_timer = IntervalTimer(self.collect, timeout, name="eapm metrics collect timer", daemon=True)
logger.debug("Starting metrics collect timer")
self._collect_timer.start()
def _stop_collect_timer(self):
if self._collect_timer:
logger.debug("Cancelling collect timer")
self._collect_timer.cancel()
class MetricsSet(object):
def __init__(self, registry):
self._lock = threading.Lock()
self._counters = {}
self._gauges = {}
self._timers = {}
self._registry = registry
self._label_limit_logged = False
def counter(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new counter
:param name: name of the counter
:param reset_on_collect: indicate if the counter should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the counter object
"""
return self._metric(self._counters, Counter, name, reset_on_collect, labels)
def gauge(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new gauge
:param name: name of the gauge
:param reset_on_collect: indicate if the gouge should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the gauge object
"""
return self._metric(self._gauges, Gauge, name, reset_on_collect, labels)
def timer(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new timer
:param name: name of the timer
:param reset_on_collect: indicate if the timer should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the timer object
"""
return self._metric(self._timers, Timer, name, reset_on_collect, labels)
def _metric(self, container, metric_class, name, reset_on_collect, labels):
"""
Returns an existing or creates and returns a metric
:param container: the container for the metric
:param metric_class: the class of the metric
:param name: name of the metric
:param reset_on_collect: indicate if the metric should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the metric object
"""
labels = self._labels_to_key(labels)
key = (name, labels)
with self._lock:
if key not in container:
if self._registry._ignore_patterns and any(
pattern.match(name) for pattern in self._registry._ignore_patterns
):
metric = noop_metric
elif len(self._gauges) + len(self._counters) + len(self._timers) >= DISTINCT_LABEL_LIMIT:
if not self._label_limit_logged:
self._label_limit_logged = True
logger.warning(
"The limit of %d metricsets has been reached, no new metricsets will be created."
% DISTINCT_LABEL_LIMIT
)
metric = noop_metric
else:
metric = metric_class(name, reset_on_collect=reset_on_collect)
container[key] = metric
return container[key]
def collect(self):
"""
Collects all metrics attached to this metricset, and returns it as a generator
with one or more elements. More than one element is returned if labels are used.
The format of the return value should be
{
"samples": {"metric.name": {"value": some_float}, ...},
"timestamp": unix epoch in microsecond precision
}
"""
self.before_collect()
timestamp = int(time.time() * 1000000)
samples = defaultdict(dict)
if self._counters:
for (name, labels), c in compat.iteritems(self._counters):
if c is not noop_metric:
val = c.val
if val or not c.reset_on_collect:
samples[labels].update({name: {"value": val}})
if c.reset_on_collect:
c.reset()
if self._gauges:
for (name, labels), g in compat.iteritems(self._gauges):
if g is not noop_metric:
val = g.val
if val or not g.reset_on_collect:
samples[labels].update({name: {"value": val}})
if g.reset_on_collect:
g.reset()
if self._timers:
for (name, labels), t in compat.iteritems(self._timers):
if t is not noop_metric:
val, count = t.val
if val or not t.reset_on_collect:
samples[labels].update({name + ".sum.us": {"value": int(val * 1000000)}})
samples[labels].update({name + ".count": {"value": count}})
if t.reset_on_collect:
t.reset()
if samples:
for labels, sample in compat.iteritems(samples):
result = {"samples": sample, "timestamp": timestamp}
if labels:
result["tags"] = {k: v for k, v in labels}
yield self.before_yield(result)
def before_collect(self):
"""
A method that is called right before collection. Can be used to gather metrics.
:return:
"""
pass
def before_yield(self, data):
return data
def _labels_to_key(self, labels):
return tuple((k, compat.text_type(v)) for k, v in sorted(compat.iteritems(labels)))
class SpanBoundMetricSet(MetricsSet):
def before_yield(self, data):
tags = data.get("tags", None)
if tags:
span_type, span_subtype = tags.pop("span.type", None), tags.pop("span.subtype", "")
if span_type or span_subtype:
data["span"] = {"type": span_type, "subtype": span_subtype}
transaction_name, transaction_type = tags.pop("transaction.name", None), tags.pop("transaction.type", None)
if transaction_name or transaction_type:
data["transaction"] = {"name": transaction_name, "type": transaction_type}
return data
class Counter(object):
__slots__ = ("name", "_lock", "_initial_value", "_val", "reset_on_collect")
def __init__(self, name, initial_value=0, reset_on_collect=False):
"""
Creates a new counter
:param name: name of the counter
:param initial_value: initial value of the counter, defaults to 0
"""
self.name = name
self._lock = threading.Lock()
self._val = self._initial_value = initial_value
self.reset_on_collect = reset_on_collect
def inc(self, delta=1):
"""
Increments the counter. If no delta is provided, it is incremented by one
:param delta: the amount to increment the counter by
:returns the counter itself
"""
with self._lock:
self._val += delta
return self
def dec(self, delta=1):
"""
Decrements the counter. If no delta is provided, it is decremented by one
:param delta: the amount to decrement the counter by
:returns the counter itself
"""
with self._lock:
self._val -= delta
return self
def reset(self):
"""
Reset the counter to the initial value
:returns the counter itself
"""
with self._lock:
self._val = self._initial_value
return self
@property
def val(self):
"""Returns the current value of the counter"""
return self._val
class Gauge(object):
__slots__ = ("name", "_val", "reset_on_collect")
def __init__(self, name, reset_on_collect=False):
"""
Creates a new gauge
:param name: label of the gauge
"""
self.name = name
self._val = None
self.reset_on_collect = reset_on_collect
@property
def val(self):
return self._val
@val.setter
def val(self, value):
self._val = value
def reset(self):
self._val = 0
class Timer(object):
__slots__ = ("name", "_val", "_count", "_lock", "reset_on_collect")
def __init__(self, name=None, reset_on_collect=False):
self.name = name
self._val = 0
self._count = 0
self._lock = threading.Lock()
self.reset_on_collect = reset_on_collect
def update(self, duration, count=1):
with self._lock:
self._val += duration
self._count += count
def reset(self):
with self._lock:
self._val = 0
self._count = 0
@property
def val(self):
with self._lock:
return self._val, self._count
class NoopMetric(object):
"""
A no-op metric that implements the "interface" of both Counter and Gauge.
Note that even when using a no-op metric, the value itself will still be calculated.
"""
def __init__(self, label, initial_value=0):
return
@property
def val(self):
return None
@val.setter
def val(self, value):
return
def inc(self, delta=1):
return
def dec(self, delta=-1):
return
def update(self, duration, count=1):
return
def reset(self):
return
noop_metric = NoopMetric("noop")
class MetricSetNotFound(LookupError):
def __init__(self, class_path):
super(MetricSetNotFound, self).__init__("%s metric set not found" % class_path)
|
the-stack_0_8839 | # P2P helper functions
# Copyright (c) 2013-2015, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import threading
import time
import Queue
import hwsim_utils
MGMT_SUBTYPE_PROBE_REQ = 4
MGMT_SUBTYPE_ACTION = 13
ACTION_CATEG_PUBLIC = 4
P2P_GO_NEG_REQ = 0
P2P_GO_NEG_RESP = 1
P2P_GO_NEG_CONF = 2
P2P_INVITATION_REQ = 3
P2P_INVITATION_RESP = 4
P2P_DEV_DISC_REQ = 5
P2P_DEV_DISC_RESP = 6
P2P_PROV_DISC_REQ = 7
P2P_PROV_DISC_RESP = 8
P2P_ATTR_STATUS = 0
P2P_ATTR_MINOR_REASON_CODE = 1
P2P_ATTR_CAPABILITY = 2
P2P_ATTR_DEVICE_ID = 3
P2P_ATTR_GROUP_OWNER_INTENT = 4
P2P_ATTR_CONFIGURATION_TIMEOUT = 5
P2P_ATTR_LISTEN_CHANNEL = 6
P2P_ATTR_GROUP_BSSID = 7
P2P_ATTR_EXT_LISTEN_TIMING = 8
P2P_ATTR_INTENDED_INTERFACE_ADDR = 9
P2P_ATTR_MANAGEABILITY = 10
P2P_ATTR_CHANNEL_LIST = 11
P2P_ATTR_NOTICE_OF_ABSENCE = 12
P2P_ATTR_DEVICE_INFO = 13
P2P_ATTR_GROUP_INFO = 14
P2P_ATTR_GROUP_ID = 15
P2P_ATTR_INTERFACE = 16
P2P_ATTR_OPERATING_CHANNEL = 17
P2P_ATTR_INVITATION_FLAGS = 18
P2P_ATTR_OOB_GO_NEG_CHANNEL = 19
P2P_ATTR_SERVICE_HASH = 21
P2P_ATTR_SESSION_INFORMATION_DATA = 22
P2P_ATTR_CONNECTION_CAPABILITY = 23
P2P_ATTR_ADVERTISEMENT_ID = 24
P2P_ATTR_ADVERTISED_SERVICE = 25
P2P_ATTR_SESSION_ID = 26
P2P_ATTR_FEATURE_CAPABILITY = 27
P2P_ATTR_PERSISTENT_GROUP = 28
P2P_ATTR_VENDOR_SPECIFIC = 221
P2P_SC_SUCCESS = 0
P2P_SC_FAIL_INFO_CURRENTLY_UNAVAILABLE = 1
P2P_SC_FAIL_INCOMPATIBLE_PARAMS = 2
P2P_SC_FAIL_LIMIT_REACHED = 3
P2P_SC_FAIL_INVALID_PARAMS = 4
P2P_SC_FAIL_UNABLE_TO_ACCOMMODATE = 5
P2P_SC_FAIL_PREV_PROTOCOL_ERROR = 6
P2P_SC_FAIL_NO_COMMON_CHANNELS = 7
P2P_SC_FAIL_UNKNOWN_GROUP = 8
P2P_SC_FAIL_BOTH_GO_INTENT_15 = 9
P2P_SC_FAIL_INCOMPATIBLE_PROV_METHOD = 10
P2P_SC_FAIL_REJECTED_BY_USER = 11
WSC_ATTR_CONFIG_METHODS = 0x1008
WLAN_EID_SSID = 0
WLAN_EID_SUPP_RATES = 1
WLAN_EID_VENDOR_SPECIFIC = 221
def go_neg_pin_authorized_persistent(i_dev, r_dev, i_intent=None, r_intent=None,
i_method='enter', r_method='display',
test_data=True, r_listen=True):
if r_listen:
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method,
go_intent=r_intent, persistent=True)
if r_listen:
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method,
timeout=20, go_intent=i_intent,
persistent=True)
r_res = r_dev.p2p_go_neg_auth_result()
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def terminate_group(go, cli):
logger.info("Terminate persistent group")
go.remove_group()
cli.wait_go_ending_session()
def invite(inv, resp, extra=None, persistent_reconnect=True, use_listen=True):
addr = resp.p2p_dev_addr()
if persistent_reconnect:
resp.global_request("SET persistent_reconnect 1")
else:
resp.global_request("SET persistent_reconnect 0")
if use_listen:
resp.p2p_listen()
else:
resp.p2p_find(social=True)
if not inv.discover_peer(addr, social=True):
raise Exception("Peer " + addr + " not found")
inv.dump_monitor()
peer = inv.get_peer(addr)
cmd = "P2P_INVITE persistent=" + peer['persistent'] + " peer=" + addr
if extra:
cmd = cmd + " " + extra
inv.global_request(cmd)
def check_result(go, cli):
ev = go.wait_global_event(["P2P-GROUP-STARTED",
"Failed to start AP functionality"], timeout=30)
if ev is None:
raise Exception("Timeout on group re-invocation (on GO)")
if "P2P-GROUP-STARTED" not in ev:
raise Exception("GO failed to start the group for re-invocation")
if "[PERSISTENT]" not in ev:
raise Exception("Re-invoked group not marked persistent")
go_res = go.group_form_result(ev)
if go_res['role'] != 'GO':
raise Exception("Persistent group GO did not become GO")
if not go_res['persistent']:
raise Exception("Persistent group not re-invoked as persistent (GO)")
ev = cli.wait_global_event(["P2P-GROUP-STARTED"], timeout=30)
if ev is None:
raise Exception("Timeout on group re-invocation (on client)")
if "[PERSISTENT]" not in ev:
raise Exception("Re-invoked group not marked persistent")
cli_res = cli.group_form_result(ev)
if cli_res['role'] != 'client':
raise Exception("Persistent group client did not become client")
if not cli_res['persistent']:
raise Exception("Persistent group not re-invoked as persistent (cli)")
return [go_res, cli_res]
def form(go, cli, test_data=True, reverse_init=False, r_listen=True):
logger.info("Form a persistent group")
if reverse_init:
[i_res, r_res] = go_neg_pin_authorized_persistent(i_dev=cli, i_intent=0,
r_dev=go, r_intent=15,
test_data=test_data,
r_listen=r_listen)
else:
[i_res, r_res] = go_neg_pin_authorized_persistent(i_dev=go, i_intent=15,
r_dev=cli, r_intent=0,
test_data=test_data,
r_listen=r_listen)
if not i_res['persistent'] or not r_res['persistent']:
raise Exception("Formed group was not persistent")
terminate_group(go, cli)
if reverse_init:
return r_res
else:
return i_res
def invite_from_cli(go, cli, terminate=True):
logger.info("Re-invoke persistent group from client")
invite(cli, go)
[go_res, cli_res] = check_result(go, cli)
hwsim_utils.test_connectivity_p2p(go, cli)
if terminate:
terminate_group(go, cli)
return [go_res, cli_res]
def invite_from_go(go, cli, terminate=True, extra=None):
logger.info("Re-invoke persistent group from GO")
invite(go, cli, extra=extra)
[go_res, cli_res] = check_result(go, cli)
hwsim_utils.test_connectivity_p2p(go, cli)
if terminate:
terminate_group(go, cli)
return [go_res, cli_res]
def autogo(go, freq=None, persistent=None):
logger.info("Start autonomous GO " + go.ifname)
res = go.p2p_start_go(freq=freq, persistent=persistent)
logger.debug("res: " + str(res))
return res
def connect_cli(go, client, social=False, freq=None):
logger.info("Try to connect the client to the GO")
pin = client.wps_read_pin()
go.p2p_go_authorize_client(pin)
res = client.p2p_connect_group(go.p2p_dev_addr(), pin, timeout=60,
social=social, freq=freq)
logger.info("Client connected")
hwsim_utils.test_connectivity_p2p(go, client)
return res
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("freq mismatch")
if 'go_neg_freq' in r_res and i_res['go_neg_freq'] != r_res['go_neg_freq']:
raise Exception("go_neg_freq mismatch")
if i_res['freq'] != i_res['go_neg_freq']:
raise Exception("freq/go_neg_freq mismatch")
if i_res['role'] != i_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if 'go_neg_role' in r_res and r_res['role'] != r_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
except Exception as e:
i_res = None
logger.info("go_neg_init thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=20)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None,
expect_failure=False, i_go_neg_status=None,
i_method='enter', r_method='display', test_data=True,
i_freq=None, r_freq=None,
i_freq2=None, r_freq2=None,
i_max_oper_chwidth=None, r_max_oper_chwidth=None,
i_ht40=False, i_vht=False, r_ht40=False, r_vht=False):
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method,
go_intent=r_intent, freq=r_freq, freq2=r_freq2,
max_oper_chwidth=r_max_oper_chwidth, ht40=r_ht40,
vht=r_vht)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method,
timeout=20, go_intent=i_intent,
expect_failure=expect_failure, freq=i_freq,
freq2=i_freq2,
max_oper_chwidth=i_max_oper_chwidth,
ht40=i_ht40, vht=i_vht)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def go_neg_init_pbc(i_dev, r_dev, i_intent, res, freq, provdisc):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc",
timeout=20, go_intent=i_intent, freq=freq,
provdisc=provdisc)
logger.debug("i_res: " + str(i_res))
except Exception as e:
i_res = None
logger.info("go_neg_init_pbc thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None, i_freq=None, r_freq=None, provdisc=False, r_listen=False):
if r_listen:
r_dev.p2p_listen()
else:
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res, i_freq, provdisc))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
# Allow some time for the GO Neg Resp to go out before initializing new
# GO Negotiation.
time.sleep(0.2)
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, timeout=20, freq=r_freq)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init_pbc thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pbc_authorized(i_dev, r_dev, i_intent=None, r_intent=None,
expect_failure=False, i_freq=None, r_freq=None):
i_dev.p2p_listen()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc", timeout=20,
go_intent=i_intent,
expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if expect_failure:
return
logger.info("Group formed")
return [i_res, r_res]
def remove_group(dev1, dev2):
dev1.remove_group()
try:
dev2.remove_group()
except:
pass
|
the-stack_0_8840 | # dataset settings
data_source_cfg = dict(type='ImageNet')
# StanfordCars
data_train_labeled_list = 'data/meta/Cars/image_list/train_50.txt' # download from Self-Tuning
data_train_unlabeled_list = 'data/meta/Cars/image_list/unlabeled_50.txt'
data_train_root = 'data/StanfordCars/'
data_test_list = 'data/meta/Cars/image_list/test.txt'
data_test_root = 'data/StanfordCars/'
dataset_type = 'SemiSupervisedDataset'
img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])
train_pipeline = [
dict(type='Resize', size=256),
dict(type='RandomResizedCrop', size=224, scale=(0.08, 1.)),
dict(type='RandomHorizontalFlip'),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
data = dict(
imgs_per_gpu=24, # 24 x 1gpu = 24
workers_per_gpu=4,
drop_last=True, # moco
train=dict(
type=dataset_type,
data_source_labeled=dict(
list_file=data_train_labeled_list, root=data_train_root, **data_source_cfg),
data_source_unlabeled=dict(
list_file=data_train_unlabeled_list, root=data_train_root, **data_source_cfg),
pipeline_labeled=train_pipeline,
pipeline_unlabeled=train_pipeline,
prefetch=prefetch,
),
val=dict(
type='ClassificationDataset',
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline,
prefetch=False,
))
# validation hook
evaluation = dict(
initial=False,
interval=1,
imgs_per_gpu=100,
workers_per_gpu=4,
eval_param=dict(topk=(1, 5)))
# checkpoint
checkpoint_config = dict(interval=10, max_keep_ckpts=1)
|
the-stack_0_8841 | #
# Copyright 2018-2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
from abc import abstractmethod
import glob
import logging
import os
import subprocess
import sys
import time
from typing import Any
from typing import Optional
from typing import Type
from typing import TypeVar
from urllib.parse import urlparse
from packaging import version
# Inputs and Outputs separator character. If updated,
# same-named variable in _notebook_op.py must be updated!
INOUT_SEPARATOR = ';'
# Setup forward reference for type hint on return from class factory method. See
# https://stackoverflow.com/questions/39205527/can-you-annotate-return-type-when-value-is-instance-of-cls/39205612#39205612
F = TypeVar('F', bound='FileOpBase')
logger = logging.getLogger('elyra')
enable_pipeline_info = os.getenv('ELYRA_ENABLE_PIPELINE_INFO', 'true').lower() == 'true'
pipeline_name = None # global used in formatted logging
operation_name = None # global used in formatted logging
class FileOpBase(ABC):
"""Abstract base class for file-based operations"""
filepath = None
cos_client = None
cos_bucket = None
@classmethod
def get_instance(cls: Type[F], **kwargs: Any) -> F:
"""Creates an appropriate subclass instance based on the extension of the filepath (-f) argument"""
filepath = kwargs['filepath']
if '.ipynb' in filepath:
return NotebookFileOp(**kwargs)
elif '.py' in filepath:
return PythonFileOp(**kwargs)
elif '.r' in filepath:
return RFileOp(**kwargs)
else:
raise ValueError('Unsupported file type: {}'.format(filepath))
def __init__(self, **kwargs: Any) -> None:
"""Initializes the FileOpBase instance"""
import minio
self.filepath = kwargs['filepath']
self.input_params = kwargs or []
self.cos_endpoint = urlparse(self.input_params.get('cos-endpoint'))
self.cos_bucket = self.input_params.get('cos-bucket')
# Infer secure from the endpoint's scheme.
self.secure = self.cos_endpoint.scheme == 'https'
self.cos_client = minio.Minio(self.cos_endpoint.netloc,
access_key=os.getenv('AWS_ACCESS_KEY_ID'),
secret_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
secure=self.secure)
@abstractmethod
def execute(self) -> None:
"""Execute the operation relative to derived class"""
raise NotImplementedError("Method 'execute()' must be implemented by subclasses!")
def process_dependencies(self) -> None:
"""Process dependencies
If a dependency archive is present, it will be downloaded from object storage
and expanded into the local directory.
This method can be overridden by subclasses, although overrides should first
call the superclass method.
"""
OpUtil.log_operation_info('processing dependencies')
t0 = time.time()
archive_file = self.input_params.get('cos-dependencies-archive')
self.get_file_from_object_storage(archive_file)
inputs = self.input_params.get('inputs')
if inputs:
input_list = inputs.split(INOUT_SEPARATOR)
for file in input_list:
self.get_file_from_object_storage(file.strip())
subprocess.call(['tar', '-zxvf', archive_file])
duration = time.time() - t0
OpUtil.log_operation_info("dependencies processed", duration)
def process_outputs(self) -> None:
"""Process outputs
If outputs have been specified, it will upload the appropriate files to object storage
This method can be overridden by subclasses, although overrides should first
call the superclass method.
"""
OpUtil.log_operation_info('processing outputs')
t0 = time.time()
outputs = self.input_params.get('outputs')
if outputs:
output_list = outputs.split(INOUT_SEPARATOR)
for file in output_list:
self.process_output_file(file.strip())
duration = time.time() - t0
OpUtil.log_operation_info('outputs processed', duration)
else:
OpUtil.log_operation_info('No outputs found in this operation')
def get_object_storage_filename(self, filename: str) -> str:
"""Function to pre-pend cloud storage working dir to file name
:param filename: the local file
:return: the full path of the object storage file
"""
return os.path.join(self.input_params.get('cos-directory', ''), filename)
def get_file_from_object_storage(self, file_to_get: str) -> None:
"""Utility function to get files from an object storage
:param file_to_get: filename
"""
object_to_get = self.get_object_storage_filename(file_to_get)
t0 = time.time()
self.cos_client.fget_object(bucket_name=self.cos_bucket,
object_name=object_to_get,
file_path=file_to_get)
duration = time.time() - t0
OpUtil.log_operation_info(f"downloaded {file_to_get} from bucket: {self.cos_bucket}, object: {object_to_get}",
duration)
def put_file_to_object_storage(self, file_to_upload: str, object_name: Optional[str] = None) -> None:
"""Utility function to put files into an object storage
:param file_to_upload: filename
:param object_name: remote filename (used to rename)
"""
object_to_upload = object_name
if not object_to_upload:
object_to_upload = file_to_upload
object_to_upload = self.get_object_storage_filename(object_to_upload)
t0 = time.time()
self.cos_client.fput_object(bucket_name=self.cos_bucket,
object_name=object_to_upload,
file_path=file_to_upload)
duration = time.time() - t0
OpUtil.log_operation_info(f"uploaded {file_to_upload} to bucket: {self.cos_bucket} object: {object_to_upload}",
duration)
def has_wildcard(self, filename):
wildcards = ['*', '?']
return bool(any(c in filename for c in wildcards))
def process_output_file(self, output_file):
"""Puts the file to object storage. Handles wildcards and directories. """
matched_files = [output_file]
if self.has_wildcard(output_file): # explode the wildcarded file
matched_files = glob.glob(output_file)
for matched_file in matched_files:
if os.path.isdir(matched_file):
for file in os.listdir(matched_file):
self.process_output_file(os.path.join(matched_file, file))
else:
self.put_file_to_object_storage(matched_file)
class NotebookFileOp(FileOpBase):
"""Perform Notebook File Operation"""
def execute(self) -> None:
"""Execute the Notebook and upload results to object storage"""
notebook = os.path.basename(self.filepath)
notebook_name = notebook.replace('.ipynb', '')
notebook_output = notebook_name + '-output.ipynb'
notebook_html = notebook_name + '.html'
try:
OpUtil.log_operation_info(f"executing notebook using 'papermill {notebook} {notebook_output}'")
t0 = time.time()
# Really hate to do this but have to invoke Papermill via library as workaround
import papermill
papermill.execute_notebook(notebook, notebook_output)
duration = time.time() - t0
OpUtil.log_operation_info("notebook execution completed", duration)
NotebookFileOp.convert_notebook_to_html(notebook_output, notebook_html)
self.put_file_to_object_storage(notebook_output, notebook)
self.put_file_to_object_storage(notebook_html)
self.process_outputs()
except Exception as ex:
# log in case of errors
logger.error("Unexpected error: {}".format(sys.exc_info()[0]))
NotebookFileOp.convert_notebook_to_html(notebook_output, notebook_html)
self.put_file_to_object_storage(notebook_output, notebook)
self.put_file_to_object_storage(notebook_html)
raise ex
@staticmethod
def convert_notebook_to_html(notebook_file: str, html_file: str) -> str:
"""Function to convert a Jupyter notebook file (.ipynb) into an html file
:param notebook_file: object storage client
:param html_file: name of what the html output file should be
:return: html_file: the converted notebook in html format
"""
import nbconvert
import nbformat
OpUtil.log_operation_info(f"converting from {notebook_file} to {html_file}")
t0 = time.time()
nb = nbformat.read(notebook_file, as_version=4)
html_exporter = nbconvert.HTMLExporter()
data, resources = html_exporter.from_notebook_node(nb)
with open(html_file, "w") as f:
f.write(data)
f.close()
duration = time.time() - t0
OpUtil.log_operation_info(f"{notebook_file} converted to {html_file}", duration)
return html_file
class PythonFileOp(FileOpBase):
"""Perform Python File Operation"""
def execute(self) -> None:
"""Execute the Python script and upload results to object storage"""
python_script = os.path.basename(self.filepath)
python_script_name = python_script.replace('.py', '')
python_script_output = python_script_name + '.log'
try:
OpUtil.log_operation_info(f"executing python script using "
f"'python3 {python_script}' to '{python_script_output}'")
t0 = time.time()
with open(python_script_output, "w") as log_file:
subprocess.run(['python3', python_script], stdout=log_file, stderr=subprocess.STDOUT, check=True)
duration = time.time() - t0
OpUtil.log_operation_info("python script execution completed", duration)
self.put_file_to_object_storage(python_script_output, python_script_output)
self.process_outputs()
except Exception as ex:
# log in case of errors
logger.error("Unexpected error: {}".format(sys.exc_info()[0]))
logger.error("Error details: {}".format(ex))
self.put_file_to_object_storage(python_script_output, python_script_output)
raise ex
class RFileOp(FileOpBase):
"""Perform R File Operation"""
def execute(self) -> None:
"""Execute the R script and upload results to object storage"""
r_script = os.path.basename(self.filepath)
r_script_name = r_script.replace('.r', '')
r_script_output = r_script_name + '.log'
try:
OpUtil.log_operation_info(f"executing R script using "
f"'Rscript {r_script}' to '{r_script_output}'")
t0 = time.time()
with open(r_script_output, "w") as log_file:
subprocess.run(['Rscript', r_script], stdout=log_file, stderr=subprocess.STDOUT, check=True)
duration = time.time() - t0
OpUtil.log_operation_info("R script execution completed", duration)
self.put_file_to_object_storage(r_script_output, r_script_output)
self.process_outputs()
except Exception as ex:
# log in case of errors
logger.error("Unexpected error: {}".format(sys.exc_info()[0]))
logger.error("Error details: {}".format(ex))
self.put_file_to_object_storage(r_script_output, r_script_output)
raise ex
class OpUtil(object):
"""Utility functions for preparing file execution."""
@classmethod
def package_install(cls) -> None:
OpUtil.log_operation_info("Installing packages")
t0 = time.time()
elyra_packages = cls.package_list_to_dict("requirements-elyra.txt")
current_packages = cls.package_list_to_dict("requirements-current.txt")
to_install_list = []
for package, ver in elyra_packages.items():
if package in current_packages:
if "git+" in current_packages[package]:
logger.warning(f"WARNING: Source package {package} found already installed from "
f"{current_packages[package]}. This may conflict with the required "
f"version: {ver} . Skipping...")
elif isinstance(version.parse(current_packages[package]), version.LegacyVersion):
logger.warning(f"WARNING: Package {package} found with unsupported Legacy version "
f"scheme {current_packages[package]} already installed. Skipping...")
elif version.parse(ver) > version.parse(current_packages[package]):
logger.info(f"Updating {package} package from version {current_packages[package]} to {ver}...")
to_install_list.append(package + '==' + ver)
elif version.parse(ver) < version.parse(current_packages[package]):
logger.info(f"Newer {package} package with version {current_packages[package]} "
f"already installed. Skipping...")
else:
logger.info(f"Package not found. Installing {package} package with version {ver}...")
to_install_list.append(package + '==' + ver)
if to_install_list:
subprocess.run([sys.executable, '-m', 'pip', 'install'] + to_install_list, check=True)
subprocess.run([sys.executable, '-m', 'pip', 'freeze'])
duration = time.time() - t0
OpUtil.log_operation_info("Packages installed", duration)
@classmethod
def package_list_to_dict(cls, filename: str) -> dict:
package_dict = {}
with open(filename) as fh:
for line in fh:
if line[0] != '#':
if " @ " in line:
package_name, package_version = line.strip('\n').split(sep=" @ ")
elif "===" in line:
package_name, package_version = line.strip('\n').split(sep="===")
else:
package_name, package_version = line.strip('\n').split(sep="==")
package_dict[package_name] = package_version
return package_dict
@classmethod
def parse_arguments(cls, args) -> dict:
import argparse
global pipeline_name, operation_name
logger.debug("Parsing Arguments.....")
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--cos-endpoint', dest="cos-endpoint", help='Cloud object storage endpoint',
required=True)
parser.add_argument('-b', '--cos-bucket', dest="cos-bucket", help='Cloud object storage bucket to use',
required=True)
parser.add_argument('-d', '--cos-directory', dest="cos-directory",
help='Working directory in cloud object storage bucket to use', required=True)
parser.add_argument('-t', '--cos-dependencies-archive', dest="cos-dependencies-archive",
help='Archive containing notebook and dependency artifacts', required=True)
parser.add_argument('-f', '--file', dest="filepath", help='File to execute', required=True)
parser.add_argument('-o', '--outputs', dest="outputs", help='Files to output to object store', required=False)
parser.add_argument('-i', '--inputs', dest="inputs", help='Files to pull in from parent node', required=False)
parsed_args = vars(parser.parse_args(args))
# cos-directory is the pipeline name, set as global
pipeline_name = parsed_args.get('cos-directory')
# operation/node name is the basename of the non-suffixed filepath, set as global
operation_name = os.path.basename(os.path.splitext(parsed_args.get('filepath'))[0])
return parsed_args
@classmethod
def log_operation_info(cls, action_clause: str, duration_secs: Optional[float] = None) -> None:
"""Produces a formatted log INFO message used entirely for support purposes.
This method is intended to be called for any entries that should be captured across aggregated
log files to identify steps within a given pipeline and each of its operations. As a result,
calls to this method should produce single-line entries in the log (no embedded newlines).
Each entry is prefixed with the pipeline name.
General logging should NOT use this method but use logger.<level>() statements directly.
:param action_clause: str representing the action that is being logged
:param duration_secs: optional float value representing the duration of the action being logged
"""
global pipeline_name, operation_name
if enable_pipeline_info:
duration_clause = f"({duration_secs:.3f} secs)" if duration_secs else ""
logger.info(f"'{pipeline_name}':'{operation_name}' - {action_clause} {duration_clause}")
def main():
# Configure logger format, level
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s.%(msecs).03d] %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
# Setup packages and gather arguments
input_params = OpUtil.parse_arguments(sys.argv[1:])
OpUtil.log_operation_info("starting operation")
t0 = time.time()
OpUtil.package_install()
# Create the appropriate instance, process dependencies and execute the operation
file_op = FileOpBase.get_instance(**input_params)
file_op.process_dependencies()
file_op.execute()
duration = time.time() - t0
OpUtil.log_operation_info("operation completed", duration)
if __name__ == '__main__':
main()
|
the-stack_0_8842 | import unittest
from typing import List
class Solution:
def nthUglyNumber(self, n: int) -> int:
ugly_nums = [1] * n
multi_2 = 2
multi_3 = 3
multi_5 = 5
index_multi_2 = 0
index_multi_3 = 0
index_multi_5 = 0
for i_th in range(1, n):
next_ugly_num = min(multi_2, multi_3, multi_5)
ugly_nums[i_th] = next_ugly_num
if next_ugly_num == multi_2:
index_multi_2 += 1
multi_2 = 2 * ugly_nums[index_multi_2]
if next_ugly_num == multi_3:
index_multi_3 += 1
multi_3 = 3 * ugly_nums[index_multi_3]
if next_ugly_num == multi_5:
index_multi_5 += 1
multi_5 = 5 * ugly_nums[index_multi_5]
return ugly_nums[n-1]
class TestNthUglyNumber(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def test_nth_ugly_number_10(self):
n = 10
ugly_number = self.sol.nthUglyNumber(n)
self.assertEqual(ugly_number, 12)
def test_nth_ugly_number_5(self):
n = 5
ugly_number = self.sol.nthUglyNumber(n)
self.assertEqual(ugly_number, 5)
def test_nth_ugly_number_1(self):
n = 1
ugly_number = self.sol.nthUglyNumber(n)
self.assertEqual(ugly_number, 1)
def test_nth_ugly_number_1690(self):
n = 1690
ugly_number = self.sol.nthUglyNumber(n)
self.assertEqual(ugly_number, 2123366400)
if __name__ == "__main__":
unittest.main() |
the-stack_0_8843 | import unittest
import cupy as cp
import pytest
from skimage import data
from cucim.skimage.filters import LPIFilter2D, inverse, wiener
class TestLPIFilter2D(unittest.TestCase):
img = cp.array(data.camera()[:50, :50])
def filt_func(self, r, c):
return cp.exp(-cp.hypot(r, c) / 1)
def setUp(self):
self.f = LPIFilter2D(self.filt_func)
def tst_shape(self, x):
X = self.f(x)
assert X.shape == x.shape
def test_ip_shape(self):
rows, columns = self.img.shape[:2]
for c_slice in [slice(0, columns), slice(0, columns - 5),
slice(0, columns - 20)]:
yield (self.tst_shape, self.img[:, c_slice])
def test_inverse(self):
F = self.f(self.img)
g = inverse(F, predefined_filter=self.f)
assert g.shape == self.img.shape
g1 = inverse(F[::-1, ::-1], predefined_filter=self.f)
assert (g - g1[::-1, ::-1]).sum() < 55
# test cache
g1 = inverse(F[::-1, ::-1], predefined_filter=self.f)
assert (g - g1[::-1, ::-1]).sum() < 55
g1 = inverse(F[::-1, ::-1], self.filt_func)
assert (g - g1[::-1, ::-1]).sum() < 55
def test_wiener(self):
F = self.f(self.img)
g = wiener(F, predefined_filter=self.f)
assert g.shape == self.img.shape
g1 = wiener(F[::-1, ::-1], predefined_filter=self.f)
assert (g - g1[::-1, ::-1]).sum() < 1
g1 = wiener(F[::-1, ::-1], self.filt_func)
assert (g - g1[::-1, ::-1]).sum() < 1
def test_non_callable(self):
with pytest.raises(ValueError):
LPIFilter2D(None)
|
the-stack_0_8846 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
image1 = vtk.vtkImageCanvasSource2D()
image1.SetNumberOfScalarComponents(3)
image1.SetScalarTypeToUnsignedChar()
image1.SetExtent(0,511,0,511,0,0)
image1.SetDrawColor(255,255,0)
image1.FillBox(0,511,0,511)
pad1 = vtk.vtkImageWrapPad()
pad1.SetInputConnection(image1.GetOutputPort())
pad1.SetOutputWholeExtent(0,511,0,511,0,10)
pad1.Update()
image2 = vtk.vtkImageCanvasSource2D()
image2.SetNumberOfScalarComponents(3)
image2.SetScalarTypeToUnsignedChar()
image2.SetExtent(0,511,0,511,0,0)
image2.SetDrawColor(0,255,255)
image2.FillBox(0,511,0,511)
pad2 = vtk.vtkImageWrapPad()
pad2.SetInputConnection(image2.GetOutputPort())
pad2.SetOutputWholeExtent(0,511,0,511,0,10)
pad2.Update()
checkers = vtk.vtkImageCheckerboard()
checkers.SetInput1Data(pad1.GetOutput())
checkers.SetInput2Data(pad2.GetOutput())
checkers.SetNumberOfDivisions(11,6,0)
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(checkers.GetOutputPort())
viewer.SetZSlice(9)
viewer.SetColorWindow(255)
viewer.SetColorLevel(127.5)
viewer.Render()
# --- end of script --
|
the-stack_0_8848 | import traceback
import backoff
from collections import namedtuple
from collections.abc import AsyncIterable, Awaitable
from pyignite import Client
from pyignite.utils import is_hinted
from pyignite.exceptions import ReconnectError
from ..extensions.context_vars import fabric_service, fabric_execution
from ..protocol import FabricService, FabricExecution, TaskCollection
from contextvars import ContextVar
StartupContext = namedtuple("StartupContext", ["agent", "instance", "task_collection"])
startup_context = ContextVar("startup_context")
def run_init_delegate(function):
init_instance = startup_context.get().instance if startup_context.get().instance is not None else f"{startup_context.get().agent}-Init"
init_execution = FabricExecution(startup_context.get().agent, init_instance, "Init", f"{init_instance}-Init")
startup_context.get().task_collection.add(process_execution(init_execution, function, is_init=True))
def register_delegate(delegate, function):
async def helper():
async for execution in fabric_service.get().enumerate_executions(startup_context.get().agent, startup_context.get().instance, delegate):
startup_context.get().task_collection.add(process_execution(execution, function))
startup_context.get().task_collection.add(helper())
async def process_execution(execution, function, is_init=False):
try:
fabric_execution.set(execution)
parameters = [] if is_init else fabric_service.get().read_execution_parameters(fabric_execution.get().execution)
result = function(*parameters)
if isinstance(result, Awaitable):
result = await result
if not is_init:
if result is None:
fabric_service.get().write_execution_finished(fabric_execution.get().execution)
elif isinstance(result, AsyncIterable):
# NOTE: unlike C# code, here we read the parameters before waiting for a listener.
await fabric_service.get().wait_listener_attached(fabric_execution.get().execution)
async for data in result:
if isinstance(data, tuple) and len(data) == 2 and isinstance(data[0], int):
(key, data) = data
fabric_service.get().write_stream_item(fabric_execution.get().execution, key, data)
else:
fabric_service.get().write_stream_item_realtime(fabric_execution.get().execution, data)
fabric_service.get().write_execution_finished(fabric_execution.get().execution)
elif isinstance(result, tuple) and not is_hinted(result):
fabric_service.get().write_execution_result(fabric_execution.get().execution, list(result))
else:
fabric_service.get().write_execution_result(fabric_execution.get().execution, [result])
except Exception as ex:
print(f"Error while executing {fabric_execution.get().execution}:")
traceback.print_exception(type(ex), ex, ex.__traceback__)
if not is_init:
try:
fabric_service.get().write_execution_exception(fabric_execution.get().execution, ex)
except Exception as ex:
print(f"Error while executing {fabric_execution.get().execution}:")
traceback.print_exception(type(ex), ex, ex.__traceback__)
|
the-stack_0_8849 | # -*- coding: utf-8 -*-
u"""
.. module:: common
"""
from django.contrib.auth.models import User
from apps.volontulo.models import Offer
from apps.volontulo.models import Organization
from apps.volontulo.models import UserProfile
COMMON_OFFER_DATA = {
'organization': None,
'description': u'',
'requirements': u'',
'time_commitment': u'',
'benefits': u'',
'location': u'',
'title': u'volontulo offer',
'time_period': u''
}
def initialize_empty_volunteer():
u"""Initialize empty volunteer."""
volunteer_user1 = User.objects.create_user(
'[email protected]',
'[email protected]',
'volunteer1',
first_name=u'Grzegorz',
last_name=u'Brzęczyszczykiewicz',
)
volunteer_user1.save()
userprofile = UserProfile.objects.create(user=volunteer_user1)
userprofile.phone_no = '333666999'
userprofile.save()
return volunteer_user1
def initialize_empty_organization():
u"""Initialize empty organization."""
organization1 = Organization.objects.create(
name=u'Organization 1',
address=u'Organization 1 address',
description=u'Organization 1 description',
)
organization1.save()
organization_user1 = User.objects.create_user(
'[email protected]',
'[email protected]',
'organization1',
first_name=u'Organization1Firstname',
last_name=u'Organization1Lastname',
)
organization_user1.save()
organization_profile1 = UserProfile.objects.create(
user=organization_user1,
)
organization_profile1.organizations.add(organization1)
return organization1
def initialize_filled_volunteer_and_organization():
u"""Initialize volunteer filled with data."""
# create volunteer user
volunteer_user2 = User.objects.create_user(
'[email protected]',
'[email protected]',
'volunteer2'
)
volunteer_user2.save()
UserProfile.objects.create(user=volunteer_user2)
# create organization user to create offers
organization2 = Organization.objects.create(name=u'Organization 2')
organization2.save()
# this is required due to login to this user
organization_user2 = User.objects.create_user(
'[email protected]',
'[email protected]',
'organization2'
)
organization_user2.save()
organization_profile2 = UserProfile.objects.create(
user=organization_user2,
)
organization_profile2.organizations.add(organization2)
# create organization offers and assign volunteer to them
for i in range(11, 15):
offer = Offer.objects.create(
title=u'Title {}'.format(i),
description=u'Description {}'.format(i),
requirements=u'Requirements {}'.format(i),
time_commitment=u'Time commitment {}'.format(i),
benefits=u'Benefits {}'.format(i),
location=u'Location {}'.format(i),
time_period=u'Time period {}'.format(i),
status_old=u'ACTIVE',
votes=True,
started_at='2015-10-05 09:10:11',
finished_at='2015-12-12 12:13:14',
organization=organization2,
offer_status='published',
recruitment_status='open',
action_status='ongoing',
)
offer.volunteers.add(volunteer_user2)
offer.save()
# create additional organization offers for administrator use
for i in range(100, 110):
offer2 = Offer.objects.create(
title=u'Title {}'.format(i),
description=u'Description {}'.format(i),
requirements=u'Requirements {}'.format(i),
time_commitment=u'Time commitment {}'.format(i),
benefits=u'Benefits {}'.format(i),
location=u'Location {}'.format(i),
time_period=u'Time period {}'.format(i),
status_old=u'SUSPENDED' if i % 2 == 0 else u'NEW',
votes=True,
started_at='2015-10-05 09:10:11',
finished_at='2015-12-12 12:13:14',
organization=organization2,
offer_status='unpublished',
recruitment_status='open',
action_status='ongoing',
)
offer2.save()
return volunteer_user2, organization2
def initialize_empty_organizations():
u"""Initialize empty organization."""
for i in range(11, 15):
organization = Organization.objects.create(
id=i,
name=u'Organization {}'.format(i)
)
organization.save()
organization_user = User.objects.create_user(
'organization{}@example.com'.format(i),
'organization{}@example.com'.format(i),
'organization{}'.format(i)
)
organization_user.save()
user_profile = UserProfile.objects.create(
user=organization_user,
)
user_profile.organizations.add(organization)
def initialize_administrator(
username='[email protected]',
email='[email protected]', password='admin_password'):
u"""Initialize administrator user.
:param username: string User username
:param email: string User email
:param password: string User plaintext password
"""
administrator1 = User.objects.create_user(username, email, password)
administrator1.save()
administrator_profile = UserProfile.objects.create(user=administrator1)
administrator_profile.is_administrator = True
administrator_profile.save()
return administrator1
|
the-stack_0_8850 | from modelbasedagent import ModelBasedAgent
import numpy as np
class ThompsonSampAgent(ModelBasedAgent):
def __init__(self, dirichlet_param, reward_param, **kwargs):
super(ThompsonSampAgent, self).__init__(**kwargs)
self.dirichlet_param = dirichlet_param
self.reward_param = reward_param
self.reward = np.full((self.num_states, self.num_actions, self.num_states), self.reward_param)
def reset(self):
super(ThompsonSampAgent, self).reset()
self.reward.fill(self.reward_param)
def interact(self, reward, next_state, next_state_is_terminal, idx):
# Handle start of episode.
if reward is None:
# Return random action since there is no information.
next_action = np.random.randint(self.num_actions)
self.last_state = next_state
self.last_action = next_action
return self.last_action
# Handle completion of episode.
if next_state_is_terminal:
# Proceed as normal.
pass
# Update the reward associated with (s,a,s') if first time.
if self.reward[self.last_state, self.last_action, next_state] == self.reward_param:
self.reward[self.last_state, self.last_action, next_state] = reward
# Update set of states reached by playing a.
self.transition_observations[self.last_state, self.last_action, next_state] += 1
# Update transition probabilities after every T steps
if self.policy_step == self.T:
self.__compute_policy()
# Choose next action according to policy.
next_action = self._argmax_breaking_ties_randomly(self.value_table[next_state])
self.policy_step += 1
self.last_state = next_state
self.last_action = next_action
return self.last_action
def __compute_policy(self):
"""Compute an optimal T-step policy for the current state."""
self.policy_step = 0
transition_probs = np.zeros((self.num_states, self.num_actions, self.num_states))
for s in xrange(self.num_states):
for a in xrange(self.num_actions):
transition_probs[s,a] = np.random.dirichlet(self.transition_observations[s,a] +\
self.dirichlet_param, size=1)
self._value_iteration(transition_probs)
|
the-stack_0_8852 | from __future__ import absolute_import
from __future__ import print_function
import os
from distutils.dir_util import remove_tree
from shutil import copyfile
def clean_dir(src_dir, directory):
if os.path.exists(directory):
print("Cleaning directory: " + directory + "\n")
for f in os.listdir(directory):
target_file = os.path.join(directory, f)
if not os.path.isdir(target_file) \
and not f.lower().endswith(".py"):
os.remove(os.path.join(directory, f))
for f in os.listdir(src_dir):
src_file = os.path.join(src_dir, f)
if not os.path.isdir(src_file) and \
not f.lower().endswith(".py") and \
not f.lower().endswith(".pyc"):
copyfile(os.path.join(src_dir, f), os.path.join(directory, f))
print("Starting clean.\n")
DIST_PY_FILE_LOCATION = os.path.dirname(os.path.realpath(__file__))
DIST_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dist")
CONFIG_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "config")
SAMPLE_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "sample")
CONFIG_SRC_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dxlopenc2client",
"_config", "app")
SAMPLE_SRC_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dxlopenc2client",
"_config", "sample")
# Remove the dist directory if it exists
if os.path.exists(DIST_DIRECTORY):
print("Removing dist directory: " + DIST_DIRECTORY + "\n")
remove_tree(DIST_DIRECTORY, verbose=1)
# Clean the config directory
clean_dir(CONFIG_SRC_DIRECTORY, CONFIG_DIRECTORY)
# Clean the samples directory
clean_dir(SAMPLE_SRC_DIRECTORY, SAMPLE_DIRECTORY)
# Clean .pyc files
print("Cleaning .pyc files")
for root, dirs, files in os.walk(DIST_PY_FILE_LOCATION):
for source_file in files:
full_path = os.path.join(root, source_file)
if full_path.lower().endswith(".pyc"):
os.remove(full_path)
|
the-stack_0_8853 | import pytest
import numpy as np
from sklearn.model_selection import train_test_split
from ngboost import NGBClassifier, NGBRegressor
from ngboost.distns import Bernoulli, Normal
def test_classification():
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import roc_auc_score, log_loss
data, target = load_breast_cancer(True)
x_train, x_test, y_train, y_test = train_test_split(
data, target, test_size=0.2, random_state=42
)
ngb = NGBClassifier(Dist=Bernoulli, verbose=False)
ngb.fit(x_train, y_train)
preds = ngb.predict(x_test)
score = roc_auc_score(y_test, preds)
assert score >= 0.95
preds = ngb.predict_proba(x_test)
score = log_loss(y_test, preds)
assert score <= 0.20
score = ngb.score(x_test, y_test)
assert score <= 0.20
dist = ngb.pred_dist(x_test)
assert isinstance(dist, Bernoulli)
score = roc_auc_score(y_test, preds[:, 1])
assert score >= 0.95
def test_regression():
from sklearn.datasets import load_boston
from sklearn.metrics import mean_squared_error
data, target = load_boston(True)
x_train, x_test, y_train, y_test = train_test_split(
data, target, test_size=0.2, random_state=42
)
ngb = NGBRegressor(verbose=False)
ngb.fit(x_train, y_train)
preds = ngb.predict(x_test)
score = mean_squared_error(y_test, preds)
assert score <= 8.0
score = ngb.score(x_test, y_test)
assert score <= 8.0
dist = ngb.pred_dist(x_test)
assert isinstance(dist, Normal)
score = mean_squared_error(y_test, preds)
assert score <= 8.0
|
the-stack_0_8854 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
class TrajSpeedSubplot:
def __init__(self, ax):
self.ax = ax
self.speed_lines = []
self.speed_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.speed_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=3,
alpha=0.8)
self.speed_lines.append(line)
ax.set_xlabel("t (second)")
# ax.set_xlim([-2, 10])
ax.set_ylim([-1, 25])
self.ax.autoscale_view()
# self.ax.relim()
ax.set_ylabel("speed (m/s)")
ax.set_title("PLANNING SPEED")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = range(self.speed_lines_size)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.speed_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_speed_t_history)):
if i >= self.speed_lines_size:
print("WARNING: number of path lines is more than " \
+ str(self.speed_lines_size))
continue
speed_line = self.speed_lines[self.speed_lines_size - i - 1]
speed_line.set_xdata(planning.traj_speed_t_history[i])
speed_line.set_ydata(planning.traj_speed_v_history[i])
# speed_line.set_xdata([1,2,3,4])
# speed_line.set_ydata([1,2,3,4])
# speed_line.set_label(name[0:5])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
|
the-stack_0_8858 | import numpy as np
from colour import Color
from svgwrite import Drawing
from map_machine.geometry.flinger import Flinger
from map_machine.osm.osm_reader import Tagged
from map_machine.scheme import Scheme
class Tree(Tagged):
"""Tree on the map."""
def __init__(
self, tags: dict[str, str], coordinates: np.ndarray, point: np.ndarray
) -> None:
super().__init__(tags)
self.coordinates: np.ndarray = coordinates
self.point: np.ndarray = point
def draw(self, svg: Drawing, flinger: Flinger, scheme: Scheme) -> None:
"""Draw crown and trunk."""
scale: float = flinger.get_scale(self.coordinates)
radius: float
if diameter_crown := self.get_float("diameter_crown") is not None:
radius = diameter_crown / 2.0
else:
radius = 2.0
color: Color = scheme.get_color("evergreen_color")
svg.add(svg.circle(self.point, radius * scale, fill=color, opacity=0.3))
if (circumference := self.get_float("circumference")) is not None:
radius: float = circumference / 2.0 / np.pi
svg.add(svg.circle(self.point, radius * scale, fill="#B89A74"))
|
the-stack_0_8859 | import json
from collections import Counter
import jieba
from tqdm import tqdm
from config import *
from utils import parse_user_reviews
def build_wordmap(contents):
word_freq = Counter()
for sentence in tqdm(contents):
seg_list = jieba.cut(sentence.strip())
# Update word frequency
word_freq.update(list(seg_list))
# Create word map
words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
word_map = {k: v + 4 for v, k in enumerate(words)}
word_map['<pad>'] = 0
word_map['<start>'] = 1
word_map['<end>'] = 2
word_map['<unk>'] = 3
print('len(word_map): ' + str(len(word_map)))
print(words[:10])
with open('data/WORDMAP.json', 'w') as file:
json.dump(word_map, file, indent=4)
if __name__ == '__main__':
user_reviews = parse_user_reviews('train')
build_wordmap(user_reviews['content'])
parse_user_reviews('valid')
|
the-stack_0_8861 | import os
import sys
import numpy as np
from PIL import Image
from skimage import io
from skimage.color import rgb2gray
from torch.utils.data import Dataset
sys.path.append('../')
from research.iqa.cfg import cfg
class ImageQualityDataset(Dataset):
"""
Image Quality Dataset
"""
def __init__(self, type='train', transform=None):
assert type in ['train', 'val']
files = []
types = []
for quality_dir in os.listdir(os.path.join(cfg['iqa_img_base'], type)):
for img_f in os.listdir(os.path.join(cfg['iqa_img_base'], type, quality_dir)):
files.append(os.path.join(cfg['iqa_img_base'], type, quality_dir, img_f))
types.append(0 if quality_dir == 'LR' else 1)
self.files = files
self.types = types
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
img_name = self.files[idx]
image = io.imread(img_name)
# if image.shape[-1] > 1:
# image = rgb2gray(image)
sample = {'image': image, "type": self.types[idx], 'filename': img_name}
if self.transform:
sample['image'] = self.transform(Image.fromarray(sample['image'].astype(np.uint8)))
return sample
|
the-stack_0_8862 | import tensorflow as tf
import numpy as np
import pandas as pd
import sklearn as sk
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Flatten
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.callbacks import CSVLogger
import os
class RNN:
def __init__(self):
self.scaler = MinMaxScaler()
def parse_data(self, filename, ticker=None, dummy=False):
df = pd.read_csv(filename)
df = df.drop(df.columns[0],1)
if ticker:
df = df.loc[df["TICKER"] == ticker]
if dummy:
# just take one years worth of data (2017,2018)
df = df[(df['date'] > '2013-01-01') & (df['date'] < '2018-12-31')]
return df
def trim_dataset(self, data, batch_size):
n = len(data)
trim = n % batch_size
return data[:n-trim]
def format_data(self, data, batch_size, test_ratio=0.2, lookback_d=90, prediction_d=30 ):
# note data is already figured by ticker
lookback_days = lookback_d # number of days we want to base our prediction on
prediction_days = prediction_d # number of days we want to predict
X = []
Y = []
for i in range(len(data)-lookback_days-prediction_days):
# for debugging purposes this data can be generated with date column
# xi = data[['date','PRC','VOL']][i:i+lookback_days]
# yi = data[['date','PRC']][i+lookback_days:i+lookback_days+prediction_days]
xi = data[['PRC','VOL']][i:i+lookback_days].to_numpy()
yi = data[['PRC']][i+lookback_days:i+lookback_days+prediction_days].to_numpy()
X.append(xi)
Y.append(yi)
X = np.array(X)
y = np.array(Y)
X_tr, X_ts, y_tr, y_ts = train_test_split(X, y, train_size=(1-test_ratio), test_size=test_ratio,shuffle=False)
N, T, D = X_tr.shape
X_tr_d2 = X_tr.reshape((N, T*D)) #have to scale a 2d array
X_tr_d2 = self.scaler.fit_transform(X_tr_d2)
X_tr = X_tr_d2.reshape((N, T, D))
n, t, d = X_ts.shape
X_ts_d2 = X_ts.reshape((n, t*d))
X_ts_d2 = self.scaler.transform(X_ts_d2)
X_ts = X_ts_d2.reshape((n, t, d))
X_tr = self.trim_dataset(X_tr, batch_size)
y_tr = self.trim_dataset(y_tr, batch_size)
X_ts = self.trim_dataset(X_ts, batch_size)
y_ts = self.trim_dataset(y_ts, batch_size)
return X_tr, X_ts, y_tr, y_ts
def init_model(self, batch_size, lookback_days, D, prediction_days, lr):
self.model = Sequential()
self.model.add(LSTM(100, batch_input_shape=(batch_size, lookback_days, D), dropout=0.0, recurrent_dropout=0.0, stateful=True, kernel_initializer='random_uniform'))
self.model.add(Dense(prediction_days))
optimizer = tf.optimizers.RMSprop(lr=lr)
self.model.compile(loss='mean_squared_error', optimizer=optimizer)
def run_model(self, batch_size, epochs, X_tr, X_ts, y_tr, y_ts):
y_tr = y_tr.reshape((y_tr.shape[0], y_tr.shape[1]))
y_ts = y_ts.reshape((y_ts.shape[0], y_ts.shape[1]))
csv_logger = CSVLogger(os.path.join('/Users/Sai/Desktop/566/Financial-DL/runs/', 'sample' + '.log'), append=True)
history = self.model.fit(X_tr, y_tr, epochs=epochs, verbose=2, batch_size=batch_size, validation_data=(X_ts, y_ts), shuffle=False, callbacks=[csv_logger])
batch_size = 100
lookback_days = 150
prediction_days = 30
dimensions = 2
epochs = 100
rnn = RNN()
df = rnn.parse_data("../data/pre_data_10years", "BAC")
X_tr, X_ts, y_tr, y_ts = rnn.format_data(df, 100, lookback_d=lookback_days, prediction_d=prediction_days)
rnn.init_model(batch_size, lookback_days, dimensions, prediction_days, 0.6)
rnn.run_model(batch_size, epochs, X_tr, X_ts, y_tr, y_ts) |
the-stack_0_8863 | comando = input ("Ingrese los comandos deseados: ")
comando = list(comando)
comando = "".join(comando)
comando = comando.split("|")
print (comando)
intercambio = []
cadena_comparadora= "abcdefghijklmnñopqrstuvwxyzABCDEFGHIJKLMNÑOPQRSTUVWXYZ .,_1234567890><!#$%&/()=?¡¿´+*[]{}_:;áéíóú"
lista_abc = list(cadena_comparadora)
for i in lista_abc:
for j in lista_abc:
intercambio.append (str(str(i)+">"+str(j)))
lista_de_comandos = ["mM","Mm","aA","-espacio","mas>+","cif x","decif x"]+ intercambio
verificador = 0
while verificador < len(comando):
for i in range (len(comando)):
if comando[i] in lista_de_comandos:
verificador += 1
else:
print ("Error: Verifique los comandos ingresados")
comando = input ("Ingrese los comandos deseados: ")
comando = list(comando)
comando = "".join(comando)
comando = comando.split("|")
print(intercambio)
|
the-stack_0_8865 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of the Cloud Datastore V1 API.
This implementation forwards directly to the v3 service."""
import collections
from google.appengine.datastore import entity_pb
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import cloud_datastore_validator
from google.appengine.runtime import apiproxy_errors
_CLOUD_DATASTORE_ENABLED = datastore_pbs._CLOUD_DATASTORE_ENABLED
if _CLOUD_DATASTORE_ENABLED:
from datastore_pbs import googledatastore
SERVICE_NAME = 'cloud_datastore_v1'
V3_SERVICE_NAME = 'datastore_v3'
_NO_VERSION = 0
_MINIMUM_VERSION = 1
class _StubIdResolver(datastore_pbs.IdResolver):
"""A IdResolver that converts all project_ids to dev~project_id.
Users can provide a list of app_ids to override the conversions.
"""
def __init__(self, app_ids=None):
"""Create a _StubIdResolver.
Optionally, can provide a list of application ids.
"""
super(_StubIdResolver, self).__init__(app_ids)
def resolve_app_id(self, project_id):
"""Resolve the project id. Defaults to dev~project_id."""
try:
return super(_StubIdResolver, self).resolve_app_id(project_id)
except datastore_pbs.InvalidConversionError:
return 'dev~%s' % project_id
class CloudDatastoreV1Stub(apiproxy_stub.APIProxyStub):
"""Implementation of the Cloud Datastore V1 API.
This proxies requests to the v3 service."""
THREADSAFE = False
def __init__(self, app_id):
assert _CLOUD_DATASTORE_ENABLED, (
'Cannot initialize the Cloud Datastore'
' stub without installing the Cloud'
' Datastore client libraries.')
apiproxy_stub.APIProxyStub.__init__(self, SERVICE_NAME)
self.__app_id = app_id
self._id_resolver = _StubIdResolver([app_id])
self.__entity_converter = datastore_pbs.get_entity_converter(
self._id_resolver)
self.__service_converter = datastore_stub_util.get_service_converter(
self._id_resolver)
self.__service_validator = cloud_datastore_validator.get_service_validator(
self._id_resolver)
def _Dynamic_BeginTransaction(self, req, resp):
try:
self.__service_validator.validate_begin_transaction_req(req)
v3_req = self.__service_converter.v1_to_v3_begin_transaction_req(
self.__app_id, req)
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
except cloud_datastore_validator.ValidationError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
v3_resp = datastore_pb.Transaction()
self.__make_v3_call('BeginTransaction', v3_req, v3_resp)
try:
v1_resp = self.__service_converter.v3_to_v1_begin_transaction_resp(
v3_resp)
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.INTERNAL_ERROR,
str(e))
resp.CopyFrom(v1_resp)
def _Dynamic_Rollback(self, req, unused_resp):
try:
self.__service_validator.validate_rollback_req(req)
v3_req = self.__service_converter.v1_rollback_req_to_v3_txn(req)
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
except cloud_datastore_validator.ValidationError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
self.__make_v3_call('Rollback', v3_req, api_base_pb.VoidProto())
def _Dynamic_Commit(self, req, resp):
try:
self.__service_validator.validate_commit_req(req)
except cloud_datastore_validator.ValidationError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
single_use_txn = None
if req.WhichOneof('transaction_selector') == 'single_use_transaction':
single_use_txn = self.__begin_adhoc_txn(req)
try:
try:
if req.transaction or single_use_txn:
self.__commit(req.mutations, req.transaction or single_use_txn, resp)
else:
v3_txn_req = datastore_pb.BeginTransactionRequest()
v3_txn_req.set_app(self.__app_id)
for mutation in req.mutations:
v3_txn = datastore_pb.Transaction()
self.__make_v3_call('BeginTransaction', v3_txn_req, v3_txn)
v1_txn = self.__service_converter._v3_to_v1_txn(v3_txn)
commit_resp = googledatastore.CommitResponse()
self.__commit([mutation], v1_txn, commit_resp)
resp.index_updates += commit_resp.index_updates
mutation_result = commit_resp.mutation_results[0]
resp.mutation_results.add().CopyFrom(mutation_result)
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
except:
if single_use_txn:
self.__rollback_adhoc_txn(req, single_use_txn)
raise
def _Dynamic_RunQuery(self, req, resp):
self.__normalize_v1_run_query_request(req)
snapshot_version = None
txn = None
txn_to_cleanup = None
new_txn = None
try:
try:
self.__service_validator.validate_run_query_req(req)
if req.read_options.WhichOneof('consistency_type') == 'new_transaction':
new_txn = self.__begin_adhoc_txn(req)
v3_req = self.__service_converter.v1_run_query_req_to_v3_query(
req, new_txn=new_txn)
if new_txn:
txn = new_txn
txn_to_cleanup = new_txn
elif req.read_options.transaction:
txn = req.read_options.transaction
elif (v3_req.has_ancestor() and
req.read_options.read_consistency
!= googledatastore.ReadOptions.EVENTUAL and
v3_req.kind != '__property__'):
txn = self.__begin_adhoc_txn(req)
txn_to_cleanup = txn
v3_req.transaction = txn
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
except cloud_datastore_validator.ValidationError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
v3_resp = datastore_pb.QueryResult()
self.__make_v3_call('RunQuery', v3_req, v3_resp)
if txn:
lookup = googledatastore.LookupRequest()
lookup.project_id = req.partition_id.project_id
lookup.database_id = req.partition_id.database_id
lookup.read_options.transaction = txn
key = lookup.keys.add()
key.partition_id.CopyFrom(req.partition_id)
key.partition_id.database_id = req.database_id
path = key.path.add()
path.kind = '__none__'
path.id = 1
lookup_response = googledatastore.LookupResponse()
self._Dynamic_Lookup(lookup, lookup_response)
snapshot_version = lookup_response.missing[0].version
try:
v1_resp = self.__service_converter.v3_to_v1_run_query_resp(
v3_resp, new_txn=new_txn)
if req.query.projection:
if (len(req.query.projection) == 1 and
req.query.projection[0].property.name == '__key__'):
result_type = googledatastore.EntityResult.KEY_ONLY
else:
result_type = googledatastore.EntityResult.PROJECTION
v1_resp.batch.entity_result_type = result_type
if snapshot_version:
v1_resp.batch.snapshot_version = snapshot_version
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.INTERNAL_ERROR, str(e))
except:
if txn_to_cleanup:
self.__rollback_adhoc_txn(req, txn_to_cleanup)
raise
resp.CopyFrom(v1_resp)
def _Dynamic_Lookup(self, req, resp):
new_txn = None
try:
try:
self.__service_validator.validate_lookup_req(req)
if req.read_options.WhichOneof('consistency_type') == 'new_transaction':
new_txn = self.__begin_adhoc_txn(req)
v3_req = self.__service_converter.v1_to_v3_get_req(req, new_txn=new_txn)
except (cloud_datastore_validator.ValidationError,
datastore_pbs.InvalidConversionError) as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
v3_resp = datastore_pb.GetResponse()
self.__make_v3_call('Get', v3_req, v3_resp)
try:
v1_resp = self.__service_converter.v3_to_v1_lookup_resp(v3_resp,
new_txn=new_txn)
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.INTERNAL_ERROR,
str(e))
except:
if new_txn:
self.__rollback_adhoc_txn(req, new_txn)
raise
resp.CopyFrom(v1_resp)
def _Dynamic_AllocateIds(self, req, resp):
v3_stub = apiproxy_stub_map.apiproxy.GetStub(V3_SERVICE_NAME)
v3_refs = None
try:
self.__service_validator.validate_allocate_ids_req(req)
if req.keys:
v3_refs = self.__entity_converter.v1_to_v3_references(req.keys)
except cloud_datastore_validator.ValidationError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
str(e))
if v3_refs:
v3_full_refs = v3_stub._AllocateIds(v3_refs)
try:
resp.keys.extend(
self.__entity_converter.v3_to_v1_keys(v3_full_refs))
except datastore_pbs.InvalidConversionError as e:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.INTERNAL_ERROR, str(e))
def __begin_adhoc_txn(self, request):
"""Begins a new transaction as part of another request and returns it.
Args:
request: the request that asked for a new transaction to be created.
Returns:
a new v1 transaction.
"""
v1_txn_req = googledatastore.BeginTransactionRequest()
v1_txn_req.project_id = request.project_id
v1_txn_resp = googledatastore.BeginTransactionResponse()
self._Dynamic_BeginTransaction(v1_txn_req, v1_txn_resp)
return v1_txn_resp.transaction
def __rollback_adhoc_txn(self, request, v1_transaction):
"""Rolls back a transaction that was created as part of another request.
This is best effort only, so any error occuring during the rollback will be
silenced.
Args:
request: the request that asked for a new transaction to be created.
v1_transaction: the transaction that was created and needs to be rolled
back.
"""
try:
v1_rollback_req = googledatastore.RollbackRequest()
v1_rollback_req.project_id = request.project_id
v1_rollback_req.transaction = v1_transaction
self._Dynamic_Rollback(v1_rollback_req,
googledatastore.RollbackResponse())
except apiproxy_errors.ApplicationError as e:
pass
def __commit(self, v1_mutations, v1_txn, resp):
"""Commits a list of v1 mutations.
Args:
v1_mutations: the list of mutations to apply and commit
v1_txn: required v1 transaction handle in which to apply the mutations
resp: a v1 CommitResponse to update with the result of this commit
"""
mutation_keys = []
seen_keys = set()
allocated_keys = {}
conflict_cache = {}
version_cache = {}
for i, mutation in enumerate(v1_mutations):
v1_key, v1_entity = datastore_pbs.get_v1_mutation_key_and_entity(mutation)
key = datastore_types.ReferenceToKeyValue(v1_key, self._id_resolver)
if not datastore_pbs.is_complete_v1_key(v1_key):
v1_key = self.__put_v1_entity(v1_entity, v1_txn)
key = datastore_types.ReferenceToKeyValue(v1_key, self._id_resolver)
allocated_keys[key] = v1_key
elif key not in conflict_cache:
base_version = None
if mutation.HasField('base_version') and key not in seen_keys:
base_version = mutation.base_version
conflict_version = self.__apply_v1_mutation(mutation, base_version,
v1_txn, version_cache)
if conflict_version is not None:
conflict_cache[key] = conflict_version
mutation_keys.append(key)
seen_keys.add(key)
v3_txn = datastore_pb.Transaction()
self.__service_converter.v1_to_v3_txn(v1_txn, v3_txn)
v3_resp = datastore_pb.CommitResponse()
self.__make_v3_call('Commit', v3_txn, v3_resp)
resp.index_updates = v3_resp.cost().index_writes()
mutation_versions = {}
for version in v3_resp.version_list():
key = datastore_types.ReferenceToKeyValue(version.root_entity_key())
mutation_versions[key] = version.version()
for key in mutation_keys:
mutation_result = resp.mutation_results.add()
if key in allocated_keys:
mutation_result.key.CopyFrom(allocated_keys[key])
if key in conflict_cache:
mutation_result.conflict_detected = True
mutation_result.version = conflict_cache[key]
else:
mutation_result.version = mutation_versions[key]
def __apply_v1_mutation(self, v1_mutation, base_version, v1_txn,
version_cache):
"""Applies a v1 Mutation in a transaction.
Args:
v1_mutation: a googledatastore.Mutation, must be for a complete key.
base_version: optional, the version the entity is expected to be at. If
the entity has a different version number, the mutation does not
apply. If None, then this check is skipped.
v1_txn: a v1 transaction handle
version_cache: a cache of entity keys to version, for entities that have
been mutated previously in this transaction.
"""
v1_key, v1_entity = datastore_pbs.get_v1_mutation_key_and_entity(
v1_mutation)
key = datastore_types.ReferenceToKeyValue(v1_key, self._id_resolver)
if (v1_mutation.HasField('insert') or v1_mutation.HasField('update') or
base_version is not None) and key not in version_cache:
version_cache[key] = self.__get_v1_entity_version(v1_key, v1_txn)
if v1_mutation.HasField('insert'):
if base_version is not None and base_version != _NO_VERSION:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cannot insert an entity with a '
'base version greater than zero')
elif version_cache[key] != _NO_VERSION:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Entity already exists.')
elif v1_mutation.HasField('update'):
if base_version is not None and base_version == _NO_VERSION:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cannot update an entity with a '
'base version set to zero')
elif version_cache[key] == _NO_VERSION:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Entity does not exist.')
if base_version is not None:
persisted_version = version_cache[key]
if persisted_version != _NO_VERSION and persisted_version < base_version:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Invalid base version, it is '
'greater than the stored '
'version')
if persisted_version != base_version:
return persisted_version
if v1_mutation.HasField('delete'):
self.__delete_v1_key(v1_key, v1_txn)
version_cache[key] = _NO_VERSION
else:
self.__put_v1_entity(v1_entity, v1_txn)
version_cache[key] = _MINIMUM_VERSION
def __get_v1_entity_version(self, v1_key, v1_txn):
"""Returns the version of an entity, or _NO_VERSION if it does not exist.
Args:
v1_key: the key of the entity to lookup.
v1_txn: the transaction to use when retrieving the entity.
Returns:
the version number of the entity if it was found, or _NO_VERSION
otherwise.
"""
v3_key = entity_pb.Reference()
self.__entity_converter.v1_to_v3_reference(v1_key, v3_key)
v3_txn = datastore_pb.Transaction()
self.__service_converter.v1_to_v3_txn(v1_txn, v3_txn)
v3_get_req = datastore_pb.GetRequest()
v3_get_req.mutable_transaction().CopyFrom(v3_txn)
v3_get_req.key_list().append(v3_key)
v3_get_resp = datastore_pb.GetResponse()
self.__make_v3_call('Get', v3_get_req, v3_get_resp)
if v3_get_resp.entity(0).has_entity():
return v3_get_resp.entity(0).version()
return _NO_VERSION
def __put_v1_entity(self, v1_entity, v1_txn):
"""Writes a v1 entity to the datastore in a transaction and return its key.
Args:
v1_entity: the entity to write
v1_txn: the transaction in which to write the entity.
Returns:
the key of the entity, which may have been allocated.
"""
v3_entity = entity_pb.EntityProto()
self.__entity_converter.v1_to_v3_entity(v1_entity, v3_entity)
v3_txn = datastore_pb.Transaction()
self.__service_converter.v1_to_v3_txn(v1_txn, v3_txn)
v3_put_req = datastore_pb.PutRequest()
v3_put_req.mutable_transaction().CopyFrom(v3_txn)
v3_put_req.entity_list().append(v3_entity)
v3_put_resp = datastore_pb.PutResponse()
self.__make_v3_call('Put', v3_put_req, v3_put_resp)
v3_key = v3_put_resp.key(0)
v1_key = googledatastore.Key()
self.__entity_converter.v3_to_v1_key(v3_key, v1_key)
return v1_key
def __delete_v1_key(self, v1_key, v1_txn):
"""Deletes an entity from a v1 key in a transaction."""
v3_key = entity_pb.Reference()
self.__entity_converter.v1_to_v3_reference(v1_key, v3_key)
v3_txn = datastore_pb.Transaction()
self.__service_converter.v1_to_v3_txn(v1_txn, v3_txn)
v3_delete_req = datastore_pb.DeleteRequest()
v3_delete_req.mutable_transaction().CopyFrom(v3_txn)
v3_delete_req.add_key().CopyFrom(v3_key)
v3_delete_resp = datastore_pb.DeleteResponse()
self.__make_v3_call('Delete', v3_delete_req, v3_delete_resp)
def __normalize_v1_run_query_request(self, v1_req):
pass
def __make_v3_call(self, method, v3_req, v3_resp):
apiproxy_stub_map.MakeSyncCall(V3_SERVICE_NAME, method, v3_req, v3_resp)
|
the-stack_0_8866 | # coding: utf-8
from __future__ import unicode_literals
import re
import requests
from bs4 import BeautifulSoup
class BaseSpider(object):
def __init__(self, url):
super(BaseSpider, self).__init__()
self.url = url
self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36",
'Content-Type': 'text/html'}
self.cookies = None
self.content = None
self.addr_list = list()
def run(self):
session = requests.session()
res = session.get(self.url, headers=self.headers)
self.content = res.text
return self._extract_address()
def _extract_address(self):
pass
class XiCiSpider(BaseSpider):
def __init__(self, url):
super(XiCiSpider, self).__init__(url)
def _extract_address(self):
soup = BeautifulSoup(self.content)
tr_res = soup.findAll('tr')
for i in range(1, len(tr_res)):
td_res = tr_res[i].findAll('td')
ip = '{0}:{1}'.format(str(td_res[2].string), str(td_res[3].string))
self.addr_list.append(ip)
return self.addr_list
class KuaiSpider(BaseSpider):
def __init__(self, url):
super(KuaiSpider, self).__init__(url)
def _extract_address(self):
soup = BeautifulSoup(self.content)
tr_res = soup.findAll('tr')
for i in range(1, len(tr_res)):
td_res = tr_res[i].findAll('td')
ip = '{0}:{1}'.format(str(td_res[0].string), str(td_res[1].string))
self.addr_list.append(ip)
return self.addr_list
class LiuLiuSpider(BaseSpider):
def __init__(self, url):
super(LiuLiuSpider, self).__init__(url)
def _extract_address(self):
match_res = re.findall(r'\d+\.\d+\.\d+\.\d+:\d+', self.content)
for itm in match_res:
self.addr_list.append(itm)
return self.addr_list
class SpiderFactory(object):
def __init__(self):
super(SpiderFactory, self).__init__()
def create_spider(self, resource):
spider_type = resource['type'] - 1
spider_tuple = (XiCiSpider, KuaiSpider, LiuLiuSpider)
return spider_tuple[spider_type](resource['url'])
|
the-stack_0_8867 | """
Helpers for XAI
"""
import altair as alt
import numpy as np
import pandas as pd
import streamlit as st
from pdpbox import pdp
@st.cache(allow_output_mutation=True)
def compute_pdp_isolate(model, dataset, model_features, feature):
pdp_isolate_out = pdp.pdp_isolate(
model=model,
dataset=dataset,
model_features=model_features,
feature=feature,
num_grid_points=15,
)
return pdp_isolate_out
def pdp_chart(pdp_isolate_out, feature_name):
"""Plot pdp charts."""
source = pd.DataFrame({
"feature": pdp_isolate_out.feature_grids,
"value": pdp_isolate_out.pdp,
})
if pdp_isolate_out.feature_type == "numeric":
base = alt.Chart(source).encode(
x=alt.X("feature", title=feature_name),
y=alt.Y("value", title=""),
tooltip=["feature", "value"],
)
line = base.mark_line()
scatter = base.mark_circle(size=60)
chart = line + scatter
else:
source["feature"] = source["feature"].astype(str)
chart = alt.Chart(source).mark_bar().encode(
x=alt.X("value", title=""),
y=alt.Y("feature", title=feature_name, sort="-x"),
tooltip=["feature", "value"],
)
return chart
@st.cache(allow_output_mutation=True)
def compute_pdp_interact(model, dataset, model_features, features):
pdp_interact_out = pdp.pdp_interact(
model=model,
dataset=dataset,
model_features=model_features,
features=features,
)
return pdp_interact_out
def pdp_heatmap(pdp_interact_out, feature_names):
"""Plot pdp heatmap."""
source = pdp_interact_out.pdp
for i in [0, 1]:
if pdp_interact_out.feature_types[i] == "onehot":
value_vars = pdp_interact_out.feature_grids[i]
id_vars = list(set(source.columns) - set(value_vars))
source = pd.melt(
source, value_vars=value_vars,
id_vars=id_vars, var_name=feature_names[i])
source = source[source["value"] == 1].drop(columns=["value"])
elif pdp_interact_out.feature_types[i] == "binary":
source[feature_names[i]] = source[feature_names[i]].astype(str)
chart = alt.Chart(source).mark_rect().encode(
x=feature_names[0],
y=feature_names[1],
color="preds",
tooltip=feature_names + ["preds"]
)
return chart
def _convert_name(ind, feature_names):
"""Get index of feature name if it is given."""
if isinstance(ind, str):
return np.where(np.array(feature_names) == ind)[0][0]
return ind
def make_source_dp(shap_values, features, feature_names, feature):
ind = _convert_name(feature, feature_names)
# randomize the ordering so plotting overlaps are not related to
# data ordering
oinds = np.arange(shap_values.shape[0])
np.random.shuffle(oinds)
return pd.DataFrame({
feature: features[oinds, ind],
"value": shap_values[oinds, ind],
})
def _is_numeric(series, max_unique=16):
"""Flag if series is numeric."""
if len(set(series.values[:3000])) > max_unique:
return True
return False
def dependence_chart(source, feat_col, val_col="value"):
if _is_numeric(source[feat_col]):
scatterplot = alt.Chart(source).mark_circle(size=8).encode(
x=alt.X(f"{feat_col}:Q"),
y=alt.Y(f"{val_col}:Q", title="SHAP value"),
)
return scatterplot
stripplot = alt.Chart(source, width=40).mark_circle(size=8).encode(
x=alt.X(
"jitter:Q",
title=None,
axis=alt.Axis(values=[0], ticks=True, grid=False, labels=False),
scale=alt.Scale(),
),
y=alt.Y(f"{val_col}:Q", title="SHAP value"),
color=alt.Color(f"{feat_col}:N", legend=None),
column=alt.Column(
f"{feat_col}:N",
header=alt.Header(
labelAngle=-90,
titleOrient="top",
labelOrient="bottom",
labelAlign="right",
labelPadding=3,
),
),
).transform_calculate(
# Generate Gaussian jitter with a Box-Muller transform
jitter="sqrt(-2*log(random()))*cos(2*PI*random())"
).configure_facet(
spacing=0
).configure_view(
stroke=None
)
return stripplot
|
the-stack_0_8868 | print("How old are you?", end=' ')
age = input()
print('How tall are you?', end=' ')
height= input()
print(f"So, you're {age} old, and {height} tall.")
print('Let\'s practice everything.')
print('You\'d need to know about escapes'+
'with \\ that do \n newlines and \t tabs.')
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \nthe needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print("--------------")
print(poem)
print("--------------")
five = 10 - (2 + 3)
print(f"This should be five: {five}")
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
boxes = jars / 100
return jelly_beans, jars, boxes
start_point = 10000
beans, jars ,boxes = secret_formula(start_point)
# remember that this is another way to format a string
print("With a starting point of: {}".format(start_point))
# it's just like with an f"" string
print(f"We'd have {beans} beans, {jars} jars, and {boxes} boxes of jars.")
start_point = start_point / 10
print("We can also do that this way:")
formula = secret_formula(start_point)
# this is an easy way to apply a list to a format string
print("We'd have {} beans, {} jars, and {} boxes of jars.".format(*formula))
people = 20
cats = 30
dogs = 15
if people < cats:
print ("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs.")
if people == dogs:
print("People are equal to dogs.")
|
the-stack_0_8870 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Call, Comma, parenthesize
class FixApply(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
# I feel like we should be able to express this logic in the
# PATTERN above but I don't know how to do it so...
if args:
if args.type == self.syms.star_expr:
return # Make no change.
if (args.type == self.syms.argument and
args.children[0].value == '**'):
return # Make no change.
if kwds and (kwds.type == self.syms.argument and
kwds.children[0].value == '**'):
return # Make no change.
prefix = node.prefix
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = parenthesize(func)
func.prefix = ""
args = args.clone()
args.prefix = ""
if kwds is not None:
kwds = kwds.clone()
kwds.prefix = ""
l_newargs = [pytree.Leaf(token.STAR, u"*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, u"**"),
kwds])
l_newargs[-2].prefix = u" " # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
|
the-stack_0_8873 | # Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Run vanilla AGBM, save the results and plot the figures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import sklearn.datasets
from sklearn.model_selection import train_test_split
from agbt_b import AGBTB
import functional as F
from tree import Dataset
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('data_folder', None, 'The directory of datasets.')
flags.DEFINE_enum('dataset_name', 'all_datasets', [
'all_datasets', 'a1a', 'w1a', 'housing', 'w8a', 'a9a', 'colon', 'Year',
'rcv1'
], ('The name of instances.'
'`all_datasets` means all of the instances in the folder.'))
flags.DEFINE_enum('loss', 'L2Loss', ['L2Loss', 'LogisticLoss'],
'The loss function.')
flags.DEFINE_integer(
'early_stopping_rounds', 100000,
('Stop the algorithm if the validation loss does not improve after this'
'number of iterations.'))
flags.DEFINE_float(
'z_shrinkage_parameter', 0.1,
'The shrinkage parameter in the z-update in accelerated method.')
flags.DEFINE_string('output_dir', None,
'The directory where output will be written.')
flags.DEFINE_integer('max_depth', 4, 'Maximal depth of a tree.')
flags.DEFINE_integer('num_trees', 20, 'Number of boosting iterations.')
flags.DEFINE_float('min_split_gain', 0.01, 'Minimal gain for splitting a leaf.')
flags.DEFINE_float('learning_rate', 0.3, 'Learning rate.')
flags.DEFINE_float('regularizer_const', 1, 'Regularizer constant.')
flags.DEFINE_boolean('use_hessian', False, 'Whether to use Hessian.')
TEST_SIZE = 0.2
RANDOM_STATE = 1
LOSS = {'L2Loss': F.L2Loss, 'LogisticLoss': F.LogisticLoss}
def set_up_data(data_folder, dataset_name):
path = os.path.join(data_folder, dataset_name + '.txt')
data = sklearn.datasets.load_svmlight_file(gfile.Open(path, mode='rb'))
x = np.asarray(data[0].todense())
y = np.array(data[1])
return train_test_split(x, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
def save_output(output_dict, name, params):
dir = os.path.join(FLAGS.output_dir, 'output')
if not gfile.Exists(dir):
gfile.MakeDirs(dir)
matfile_path = dir + '/VAGBM_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}.mat'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
)
scipy.io.savemat(gfile.Open(matfile_path, 'wb'), mdict=output_dict)
return 0
def plot_figures(output_dict, name, params):
"""Plots the figure from the output."""
figure_dir = os.path.join(FLAGS.output_dir, 'figures')
if not gfile.Exists(figure_dir):
gfile.MakeDirs(figure_dir)
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
def main(argv):
del argv
if FLAGS.data_folder is None:
raise ValueError('Directory with downloaded datasets must be provided.')
if FLAGS.dataset_name == 'all_datasets':
names = ['a1a', 'w1a', 'housing']
else:
names = [FLAGS.dataset_name]
if FLAGS.output_dir is None:
raise ValueError('Output directory must be provided.')
for name in names:
x_train, x_test, y_train, y_test = set_up_data(FLAGS.data_folder, name)
train_data = Dataset(x_train, y_train)
test_data = Dataset(x_test, y_test)
GBTParams = collections.namedtuple('GBTParams', [
'regularizer_const', 'min_split_gain', 'max_depth', 'learning_rate',
'num_trees', 'early_stopping_rounds', 'loss', 'use_hessian',
'z_shrinkage_parameter'
])
params = GBTParams(
regularizer_const=FLAGS.regularizer_const,
min_split_gain=FLAGS.min_split_gain,
max_depth=FLAGS.max_depth,
learning_rate=FLAGS.learning_rate,
num_trees=FLAGS.num_trees,
early_stopping_rounds=FLAGS.early_stopping_rounds,
loss=FLAGS.loss,
use_hessian=FLAGS.use_hessian,
z_shrinkage_parameter=FLAGS.z_shrinkage_parameter)
params = params._replace(learning_rate=1)
agbt_b_method_1 = AGBTB(params)
agbt_b_train_losses_1, agbt_b_test_losses_1 = (
agbt_b_method_1.train(train_data, valid_set=test_data))
for i in range(len(agbt_b_train_losses_1)):
if agbt_b_train_losses_1[i] > 1e8:
agbt_b_train_losses_1[i] = 1e8
if agbt_b_test_losses_1[i] > 1e8:
agbt_b_test_losses_1[i] = 1e8
params = params._replace(learning_rate=0.1)
agbt_b_method_2 = AGBTB(params)
agbt_b_train_losses_2, agbt_b_test_losses_2 = (
agbt_b_method_2.train(train_data, valid_set=test_data))
params = params._replace(learning_rate=0.01)
agbt_b_method_3 = AGBTB(params)
agbt_b_train_losses_3, agbt_b_test_losses_3 = (
agbt_b_method_3.train(train_data, valid_set=test_data))
output_dict = {
'agbt_b_train_losses_1': agbt_b_train_losses_1,
'agbt_b_test_losses_1': agbt_b_test_losses_1,
'agbt_b_train_losses_2': agbt_b_train_losses_2,
'agbt_b_test_losses_2': agbt_b_test_losses_2,
'agbt_b_train_losses_3': agbt_b_train_losses_3,
'agbt_b_test_losses_3': agbt_b_test_losses_3
}
save_output(output_dict, name, params)
# plot_figures(output_dict, name, params)
if __name__ == '__main__':
app.run(main)
|
the-stack_0_8875 | import tkinter as tk
from PIL import ImageTk, Image # pip3 install Pillow
from tkinter import filedialog
import engine.torch as tengine
import matplotlib.pyplot as plt
def upload(): # AQUI SE SUBE LA IMAGEN
filename = filedialog.askopenfilename(title='open', filetypes=[("Images", ".jpg")])
img = Image.open(filename)
ph = ImageTk.PhotoImage(img)
print(filename)
global current_image_path
current_image_path = filename
tk_img = img.resize((256, 256), Image.ANTIALIAS)
tk_img = ImageTk.PhotoImage(tk_img)
panel = tk.Label(mainWindow, image=tk_img)
panel.image = tk_img
panel.pack()
panel.place(x=400, y=50)
def process_img(): # AQUI SE LLAMA AL MODELO PARA ANALIZAR LA IMAGEN
global current_image_path
img = Image.open(current_image_path)
img, covidPositive = trunner.predict(img, current_image_path)
textResult = "El individuo no presenta COVID-19"
if covidPositive:
textResult = "El individuo si presenta COVID-19"
plt.show()
result = tk.Label(mainWindow, text=textResult)
result.pack(anchor=tk.NW)
result.config(fg="red", bg="#c3d6ff", font=("Arial", 14))
result.place(x=25, y=150)
trunner = tengine.TorchEngine()
mainWindow = tk.Tk()
mainWindow.title("Deteccion de COVID-19")
mainWindow.geometry("700x400")
mainWindow.config(bg="#c3d6ff")
title = tk.Label(mainWindow, text="Deteccion de COVID-19")
title.pack(anchor=tk.NW)
title.config(fg="red", bg="#c3d6ff", font=("Arial", 22))
title.place(x=25)
uploadButton = tk.Button(mainWindow, text="Subir rayos x...", height=2, width=20, command=upload)
uploadButton.pack(anchor=tk.NW)
uploadButton.config(bg="#c0c0c0", font=("Arial", 9))
uploadButton.place(x=25, y=50)
processButton = tk.Button(mainWindow, text="Procesar", height=2, width=20, command=process_img)
processButton.pack(anchor=tk.NW)
processButton.config(bg="#c0c0c0", font=("Arial", 9))
processButton.place(x=200, y=50)
current_image_path = None
mainWindow.mainloop()
|
the-stack_0_8876 | """Builder class used to transform a mypy AST to the IR form.
The IRBuilder class maintains transformation state and provides access
to various helpers used to implement the transform.
The top-level transform control logic is in mypyc.irbuild.main.
mypyc.irbuild.visitor.IRBuilderVisitor is used to dispatch based on mypy
AST node type to code that actually does the bulk of the work. For
example, expressions are transformed in mypyc.irbuild.expression and
functions are transformed in mypyc.irbuild.function.
"""
from typing import Callable, Dict, List, Tuple, Optional, Union, Sequence, Set, Any
from typing_extensions import overload
from mypy.ordered_dict import OrderedDict
from mypy.build import Graph
from mypy.nodes import (
MypyFile, SymbolNode, Statement, OpExpr, IntExpr, NameExpr, LDEF, Var, UnaryExpr,
CallExpr, IndexExpr, Expression, MemberExpr, RefExpr, Lvalue, TupleExpr,
TypeInfo, Decorator, OverloadedFuncDef, StarExpr, GDEF, ARG_POS, ARG_NAMED
)
from mypy.types import (
Type, Instance, TupleType, UninhabitedType, get_proper_type
)
from mypy.maptype import map_instance_to_supertype
from mypy.visitor import ExpressionVisitor, StatementVisitor
from mypy.util import split_target
from mypyc.common import TEMP_ATTR_NAME
from mypyc.irbuild.prebuildvisitor import PreBuildVisitor
from mypyc.ir.ops import (
BasicBlock, AssignmentTarget, AssignmentTargetRegister, AssignmentTargetIndex,
AssignmentTargetAttr, AssignmentTargetTuple, Environment, LoadInt, Value,
Register, Op, Assign, Branch, Unreachable, TupleGet, GetAttr, SetAttr, LoadStatic,
InitStatic, PrimitiveOp, OpDescription, NAMESPACE_MODULE, RaiseStandardError,
)
from mypyc.ir.rtypes import (
RType, RTuple, RInstance, int_rprimitive, dict_rprimitive,
none_rprimitive, is_none_rprimitive, object_rprimitive, is_object_rprimitive,
str_rprimitive,
)
from mypyc.ir.func_ir import FuncIR, INVALID_FUNC_DEF
from mypyc.ir.class_ir import ClassIR, NonExtClassInfo
from mypyc.primitives.registry import func_ops, CFunctionDescription, c_function_ops
from mypyc.primitives.list_ops import list_len_op, to_list, list_pop_last
from mypyc.primitives.dict_ops import dict_get_item_op, dict_set_item_op
from mypyc.primitives.generic_ops import py_setattr_op, iter_op, next_op
from mypyc.primitives.misc_ops import true_op, false_op, import_op
from mypyc.crash import catch_errors
from mypyc.options import CompilerOptions
from mypyc.errors import Errors
from mypyc.irbuild.nonlocalcontrol import (
NonlocalControl, BaseNonlocalControl, LoopNonlocalControl, GeneratorNonlocalControl
)
from mypyc.irbuild.context import FuncInfo, ImplicitClass
from mypyc.irbuild.mapper import Mapper
from mypyc.irbuild.ll_builder import LowLevelIRBuilder
from mypyc.irbuild.util import is_constant
class IRVisitor(ExpressionVisitor[Value], StatementVisitor[None]):
pass
class UnsupportedException(Exception):
pass
class IRBuilder:
def __init__(self,
current_module: str,
types: Dict[Expression, Type],
graph: Graph,
errors: Errors,
mapper: Mapper,
pbv: PreBuildVisitor,
visitor: IRVisitor,
options: CompilerOptions) -> None:
self.builder = LowLevelIRBuilder(current_module, mapper)
self.builders = [self.builder]
self.current_module = current_module
self.mapper = mapper
self.types = types
self.graph = graph
self.ret_types = [] # type: List[RType]
self.functions = [] # type: List[FuncIR]
self.classes = [] # type: List[ClassIR]
self.final_names = [] # type: List[Tuple[str, RType]]
self.callable_class_names = set() # type: Set[str]
self.options = options
# These variables keep track of the number of lambdas, implicit indices, and implicit
# iterators instantiated so we avoid name conflicts. The indices and iterators are
# instantiated from for-loops.
self.lambda_counter = 0
self.temp_counter = 0
# These variables are populated from the first-pass PreBuildVisitor.
self.free_variables = pbv.free_variables
self.prop_setters = pbv.prop_setters
self.encapsulating_funcs = pbv.encapsulating_funcs
self.nested_fitems = pbv.nested_funcs.keys()
self.fdefs_to_decorators = pbv.funcs_to_decorators
self.visitor = visitor
# This list operates similarly to a function call stack for nested functions. Whenever a
# function definition begins to be generated, a FuncInfo instance is added to the stack,
# and information about that function (e.g. whether it is nested, its environment class to
# be generated) is stored in that FuncInfo instance. When the function is done being
# generated, its corresponding FuncInfo is popped off the stack.
self.fn_info = FuncInfo(INVALID_FUNC_DEF, '', '')
self.fn_infos = [self.fn_info] # type: List[FuncInfo]
# This list operates as a stack of constructs that modify the
# behavior of nonlocal control flow constructs.
self.nonlocal_control = [] # type: List[NonlocalControl]
self.errors = errors
# Notionally a list of all of the modules imported by the
# module being compiled, but stored as an OrderedDict so we
# can also do quick lookups.
self.imports = OrderedDict() # type: OrderedDict[str, None]
# High-level control
def set_module(self, module_name: str, module_path: str) -> None:
"""Set the name and path of the current module.
This must be called before transforming any AST nodes.
"""
self.module_name = module_name
self.module_path = module_path
@overload
def accept(self, node: Expression) -> Value: ...
@overload
def accept(self, node: Statement) -> None: ...
def accept(self, node: Union[Statement, Expression]) -> Optional[Value]:
"""Transform an expression or a statement."""
with self.catch_errors(node.line):
if isinstance(node, Expression):
try:
res = node.accept(self.visitor)
res = self.coerce(res, self.node_type(node), node.line)
# If we hit an error during compilation, we want to
# keep trying, so we can produce more error
# messages. Generate a temp of the right type to keep
# from causing more downstream trouble.
except UnsupportedException:
res = self.alloc_temp(self.node_type(node))
return res
else:
try:
node.accept(self.visitor)
except UnsupportedException:
pass
return None
# Pass through methods for the most common low-level builder ops, for convenience.
def add(self, op: Op) -> Value:
return self.builder.add(op)
def goto(self, target: BasicBlock) -> None:
self.builder.goto(target)
def activate_block(self, block: BasicBlock) -> None:
self.builder.activate_block(block)
def goto_and_activate(self, block: BasicBlock) -> None:
self.builder.goto_and_activate(block)
def alloc_temp(self, type: RType) -> Register:
return self.builder.alloc_temp(type)
def py_get_attr(self, obj: Value, attr: str, line: int) -> Value:
return self.builder.py_get_attr(obj, attr, line)
def load_static_unicode(self, value: str) -> Value:
return self.builder.load_static_unicode(value)
def primitive_op(self, desc: OpDescription, args: List[Value], line: int) -> Value:
return self.builder.primitive_op(desc, args, line)
def unary_op(self, lreg: Value, expr_op: str, line: int) -> Value:
return self.builder.unary_op(lreg, expr_op, line)
def binary_op(self, lreg: Value, rreg: Value, expr_op: str, line: int) -> Value:
return self.builder.binary_op(lreg, rreg, expr_op, line)
def coerce(self, src: Value, target_type: RType, line: int, force: bool = False) -> Value:
return self.builder.coerce(src, target_type, line, force)
def none_object(self) -> Value:
return self.builder.none_object()
def py_call(self,
function: Value,
arg_values: List[Value],
line: int,
arg_kinds: Optional[List[int]] = None,
arg_names: Optional[Sequence[Optional[str]]] = None) -> Value:
return self.builder.py_call(function, arg_values, line, arg_kinds, arg_names)
def add_bool_branch(self, value: Value, true: BasicBlock, false: BasicBlock) -> None:
self.builder.add_bool_branch(value, true, false)
def load_native_type_object(self, fullname: str) -> Value:
return self.builder.load_native_type_object(fullname)
def gen_method_call(self,
base: Value,
name: str,
arg_values: List[Value],
result_type: Optional[RType],
line: int,
arg_kinds: Optional[List[int]] = None,
arg_names: Optional[List[Optional[str]]] = None) -> Value:
return self.builder.gen_method_call(
base, name, arg_values, result_type, line, arg_kinds, arg_names
)
def load_module(self, name: str) -> Value:
return self.builder.load_module(name)
def call_c(self, desc: CFunctionDescription, args: List[Value], line: int) -> Value:
return self.builder.call_c(desc, args, line)
def binary_int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: int) -> Value:
return self.builder.binary_int_op(type, lhs, rhs, op, line)
def compare_tagged(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
return self.builder.compare_tagged(lhs, rhs, op, line)
@property
def environment(self) -> Environment:
return self.builder.environment
# Helpers for IR building
def add_to_non_ext_dict(self, non_ext: NonExtClassInfo,
key: str, val: Value, line: int) -> None:
# Add an attribute entry into the class dict of a non-extension class.
key_unicode = self.load_static_unicode(key)
self.call_c(dict_set_item_op, [non_ext.dict, key_unicode, val], line)
def gen_import(self, id: str, line: int) -> None:
self.imports[id] = None
needs_import, out = BasicBlock(), BasicBlock()
first_load = self.load_module(id)
comparison = self.binary_op(first_load, self.none_object(), 'is not', line)
self.add_bool_branch(comparison, out, needs_import)
self.activate_block(needs_import)
value = self.primitive_op(import_op, [self.load_static_unicode(id)], line)
self.add(InitStatic(value, id, namespace=NAMESPACE_MODULE))
self.goto_and_activate(out)
def assign_if_null(self, target: AssignmentTargetRegister,
get_val: Callable[[], Value], line: int) -> None:
"""Generate blocks for registers that NULL values."""
error_block, body_block = BasicBlock(), BasicBlock()
self.add(Branch(target.register, error_block, body_block, Branch.IS_ERROR))
self.activate_block(error_block)
self.add(Assign(target.register, self.coerce(get_val(), target.register.type, line)))
self.goto(body_block)
self.activate_block(body_block)
def maybe_add_implicit_return(self) -> None:
if is_none_rprimitive(self.ret_types[-1]) or is_object_rprimitive(self.ret_types[-1]):
self.add_implicit_return()
else:
self.add_implicit_unreachable()
def add_implicit_return(self) -> None:
block = self.builder.blocks[-1]
if not block.terminated:
retval = self.coerce(self.builder.none(), self.ret_types[-1], -1)
self.nonlocal_control[-1].gen_return(self, retval, self.fn_info.fitem.line)
def add_implicit_unreachable(self) -> None:
block = self.builder.blocks[-1]
if not block.terminated:
self.add(Unreachable())
def disallow_class_assignments(self, lvalues: List[Lvalue], line: int) -> None:
# Some best-effort attempts to disallow assigning to class
# variables that aren't marked ClassVar, since we blatantly
# miscompile the interaction between instance and class
# variables.
for lvalue in lvalues:
if (isinstance(lvalue, MemberExpr)
and isinstance(lvalue.expr, RefExpr)
and isinstance(lvalue.expr.node, TypeInfo)):
var = lvalue.expr.node[lvalue.name].node
if isinstance(var, Var) and not var.is_classvar:
self.error(
"Only class variables defined as ClassVar can be assigned to",
line)
def non_function_scope(self) -> bool:
# Currently the stack always has at least two items: dummy and top-level.
return len(self.fn_infos) <= 2
def init_final_static(self, lvalue: Lvalue, rvalue_reg: Value,
class_name: Optional[str] = None) -> None:
assert isinstance(lvalue, NameExpr)
assert isinstance(lvalue.node, Var)
if lvalue.node.final_value is None:
if class_name is None:
name = lvalue.name
else:
name = '{}.{}'.format(class_name, lvalue.name)
assert name is not None, "Full name not set for variable"
self.final_names.append((name, rvalue_reg.type))
self.add(InitStatic(rvalue_reg, name, self.module_name))
def load_final_static(self, fullname: str, typ: RType, line: int,
error_name: Optional[str] = None) -> Value:
split_name = split_target(self.graph, fullname)
assert split_name is not None
module, name = split_name
return self.builder.load_static_checked(
typ, name, module, line=line,
error_msg='value for final name "{}" was not set'.format(error_name))
def load_final_literal_value(self, val: Union[int, str, bytes, float, bool],
line: int) -> Value:
"""Load value of a final name or class-level attribute."""
if isinstance(val, bool):
if val:
return self.primitive_op(true_op, [], line)
else:
return self.primitive_op(false_op, [], line)
elif isinstance(val, int):
# TODO: take care of negative integer initializers
# (probably easier to fix this in mypy itself).
return self.builder.load_static_int(val)
elif isinstance(val, float):
return self.builder.load_static_float(val)
elif isinstance(val, str):
return self.builder.load_static_unicode(val)
elif isinstance(val, bytes):
return self.builder.load_static_bytes(val)
else:
assert False, "Unsupported final literal value"
def get_assignment_target(self, lvalue: Lvalue,
line: int = -1) -> AssignmentTarget:
if isinstance(lvalue, NameExpr):
# If we are visiting a decorator, then the SymbolNode we really want to be looking at
# is the function that is decorated, not the entire Decorator node itself.
symbol = lvalue.node
if isinstance(symbol, Decorator):
symbol = symbol.func
if symbol is None:
# New semantic analyzer doesn't create ad-hoc Vars for special forms.
assert lvalue.is_special_form
symbol = Var(lvalue.name)
if lvalue.kind == LDEF:
if symbol not in self.environment.symtable:
# If the function is a generator function, then first define a new variable
# in the current function's environment class. Next, define a target that
# refers to the newly defined variable in that environment class. Add the
# target to the table containing class environment variables, as well as the
# current environment.
if self.fn_info.is_generator:
return self.add_var_to_env_class(symbol, self.node_type(lvalue),
self.fn_info.generator_class,
reassign=False)
# Otherwise define a new local variable.
return self.environment.add_local_reg(symbol, self.node_type(lvalue))
else:
# Assign to a previously defined variable.
return self.environment.lookup(symbol)
elif lvalue.kind == GDEF:
globals_dict = self.load_globals_dict()
name = self.load_static_unicode(lvalue.name)
return AssignmentTargetIndex(globals_dict, name)
else:
assert False, lvalue.kind
elif isinstance(lvalue, IndexExpr):
# Indexed assignment x[y] = e
base = self.accept(lvalue.base)
index = self.accept(lvalue.index)
return AssignmentTargetIndex(base, index)
elif isinstance(lvalue, MemberExpr):
# Attribute assignment x.y = e
obj = self.accept(lvalue.expr)
return AssignmentTargetAttr(obj, lvalue.name)
elif isinstance(lvalue, TupleExpr):
# Multiple assignment a, ..., b = e
star_idx = None # type: Optional[int]
lvalues = []
for idx, item in enumerate(lvalue.items):
targ = self.get_assignment_target(item)
lvalues.append(targ)
if isinstance(item, StarExpr):
if star_idx is not None:
self.error("Two starred expressions in assignment", line)
star_idx = idx
return AssignmentTargetTuple(lvalues, star_idx)
elif isinstance(lvalue, StarExpr):
return self.get_assignment_target(lvalue.expr)
assert False, 'Unsupported lvalue: %r' % lvalue
def read(self, target: Union[Value, AssignmentTarget], line: int = -1) -> Value:
if isinstance(target, Value):
return target
if isinstance(target, AssignmentTargetRegister):
return target.register
if isinstance(target, AssignmentTargetIndex):
reg = self.gen_method_call(
target.base, '__getitem__', [target.index], target.type, line)
if reg is not None:
return reg
assert False, target.base.type
if isinstance(target, AssignmentTargetAttr):
if isinstance(target.obj.type, RInstance) and target.obj.type.class_ir.is_ext_class:
return self.add(GetAttr(target.obj, target.attr, line))
else:
return self.py_get_attr(target.obj, target.attr, line)
assert False, 'Unsupported lvalue: %r' % target
def assign(self, target: Union[Register, AssignmentTarget],
rvalue_reg: Value, line: int) -> None:
if isinstance(target, Register):
self.add(Assign(target, rvalue_reg))
elif isinstance(target, AssignmentTargetRegister):
rvalue_reg = self.coerce(rvalue_reg, target.type, line)
self.add(Assign(target.register, rvalue_reg))
elif isinstance(target, AssignmentTargetAttr):
if isinstance(target.obj_type, RInstance):
rvalue_reg = self.coerce(rvalue_reg, target.type, line)
self.add(SetAttr(target.obj, target.attr, rvalue_reg, line))
else:
key = self.load_static_unicode(target.attr)
boxed_reg = self.builder.box(rvalue_reg)
self.add(PrimitiveOp([target.obj, key, boxed_reg], py_setattr_op, line))
elif isinstance(target, AssignmentTargetIndex):
target_reg2 = self.gen_method_call(
target.base, '__setitem__', [target.index, rvalue_reg], None, line)
assert target_reg2 is not None, target.base.type
elif isinstance(target, AssignmentTargetTuple):
if isinstance(rvalue_reg.type, RTuple) and target.star_idx is None:
rtypes = rvalue_reg.type.types
assert len(rtypes) == len(target.items)
for i in range(len(rtypes)):
item_value = self.add(TupleGet(rvalue_reg, i, line))
self.assign(target.items[i], item_value, line)
else:
self.process_iterator_tuple_assignment(target, rvalue_reg, line)
else:
assert False, 'Unsupported assignment target'
def process_iterator_tuple_assignment_helper(self,
litem: AssignmentTarget,
ritem: Value, line: int) -> None:
error_block, ok_block = BasicBlock(), BasicBlock()
self.add(Branch(ritem, error_block, ok_block, Branch.IS_ERROR))
self.activate_block(error_block)
self.add(RaiseStandardError(RaiseStandardError.VALUE_ERROR,
'not enough values to unpack', line))
self.add(Unreachable())
self.activate_block(ok_block)
self.assign(litem, ritem, line)
def process_iterator_tuple_assignment(self,
target: AssignmentTargetTuple,
rvalue_reg: Value,
line: int) -> None:
iterator = self.primitive_op(iter_op, [rvalue_reg], line)
# This may be the whole lvalue list if there is no starred value
split_idx = target.star_idx if target.star_idx is not None else len(target.items)
# Assign values before the first starred value
for litem in target.items[:split_idx]:
ritem = self.primitive_op(next_op, [iterator], line)
error_block, ok_block = BasicBlock(), BasicBlock()
self.add(Branch(ritem, error_block, ok_block, Branch.IS_ERROR))
self.activate_block(error_block)
self.add(RaiseStandardError(RaiseStandardError.VALUE_ERROR,
'not enough values to unpack', line))
self.add(Unreachable())
self.activate_block(ok_block)
self.assign(litem, ritem, line)
# Assign the starred value and all values after it
if target.star_idx is not None:
post_star_vals = target.items[split_idx + 1:]
iter_list = self.call_c(to_list, [iterator], line)
iter_list_len = self.primitive_op(list_len_op, [iter_list], line)
post_star_len = self.add(LoadInt(len(post_star_vals)))
condition = self.binary_op(post_star_len, iter_list_len, '<=', line)
error_block, ok_block = BasicBlock(), BasicBlock()
self.add(Branch(condition, ok_block, error_block, Branch.BOOL_EXPR))
self.activate_block(error_block)
self.add(RaiseStandardError(RaiseStandardError.VALUE_ERROR,
'not enough values to unpack', line))
self.add(Unreachable())
self.activate_block(ok_block)
for litem in reversed(post_star_vals):
ritem = self.call_c(list_pop_last, [iter_list], line)
self.assign(litem, ritem, line)
# Assign the starred value
self.assign(target.items[target.star_idx], iter_list, line)
# There is no starred value, so check if there are extra values in rhs that
# have not been assigned.
else:
extra = self.primitive_op(next_op, [iterator], line)
error_block, ok_block = BasicBlock(), BasicBlock()
self.add(Branch(extra, ok_block, error_block, Branch.IS_ERROR))
self.activate_block(error_block)
self.add(RaiseStandardError(RaiseStandardError.VALUE_ERROR,
'too many values to unpack', line))
self.add(Unreachable())
self.activate_block(ok_block)
def push_loop_stack(self, continue_block: BasicBlock, break_block: BasicBlock) -> None:
self.nonlocal_control.append(
LoopNonlocalControl(self.nonlocal_control[-1], continue_block, break_block))
def pop_loop_stack(self) -> None:
self.nonlocal_control.pop()
def spill(self, value: Value) -> AssignmentTarget:
"""Moves a given Value instance into the generator class' environment class."""
name = '{}{}'.format(TEMP_ATTR_NAME, self.temp_counter)
self.temp_counter += 1
target = self.add_var_to_env_class(Var(name), value.type, self.fn_info.generator_class)
# Shouldn't be able to fail, so -1 for line
self.assign(target, value, -1)
return target
def maybe_spill(self, value: Value) -> Union[Value, AssignmentTarget]:
"""
Moves a given Value instance into the environment class for generator functions. For
non-generator functions, leaves the Value instance as it is.
Returns an AssignmentTarget associated with the Value for generator functions and the
original Value itself for non-generator functions.
"""
if self.fn_info.is_generator:
return self.spill(value)
return value
def maybe_spill_assignable(self, value: Value) -> Union[Register, AssignmentTarget]:
"""
Moves a given Value instance into the environment class for generator functions. For
non-generator functions, allocate a temporary Register.
Returns an AssignmentTarget associated with the Value for generator functions and an
assignable Register for non-generator functions.
"""
if self.fn_info.is_generator:
return self.spill(value)
if isinstance(value, Register):
return value
# Allocate a temporary register for the assignable value.
reg = self.alloc_temp(value.type)
self.assign(reg, value, -1)
return reg
def extract_int(self, e: Expression) -> Optional[int]:
if isinstance(e, IntExpr):
return e.value
elif isinstance(e, UnaryExpr) and e.op == '-' and isinstance(e.expr, IntExpr):
return -e.expr.value
else:
return None
def get_sequence_type(self, expr: Expression) -> RType:
target_type = get_proper_type(self.types[expr])
assert isinstance(target_type, Instance)
if target_type.type.fullname == 'builtins.str':
return str_rprimitive
else:
return self.type_to_rtype(target_type.args[0])
def get_dict_base_type(self, expr: Expression) -> Instance:
"""Find dict type of a dict-like expression.
This is useful for dict subclasses like SymbolTable.
"""
target_type = get_proper_type(self.types[expr])
assert isinstance(target_type, Instance)
dict_base = next(base for base in target_type.type.mro
if base.fullname == 'builtins.dict')
return map_instance_to_supertype(target_type, dict_base)
def get_dict_key_type(self, expr: Expression) -> RType:
dict_base_type = self.get_dict_base_type(expr)
return self.type_to_rtype(dict_base_type.args[0])
def get_dict_value_type(self, expr: Expression) -> RType:
dict_base_type = self.get_dict_base_type(expr)
return self.type_to_rtype(dict_base_type.args[1])
def get_dict_item_type(self, expr: Expression) -> RType:
key_type = self.get_dict_key_type(expr)
value_type = self.get_dict_value_type(expr)
return RTuple([key_type, value_type])
def _analyze_iterable_item_type(self, expr: Expression) -> Type:
"""Return the item type given by 'expr' in an iterable context."""
# This logic is copied from mypy's TypeChecker.analyze_iterable_item_type.
iterable = get_proper_type(self.types[expr])
echk = self.graph[self.module_name].type_checker().expr_checker
iterator = echk.check_method_call_by_name('__iter__', iterable, [], [], expr)[0]
from mypy.join import join_types
if isinstance(iterable, TupleType):
joined = UninhabitedType() # type: Type
for item in iterable.items:
joined = join_types(joined, item)
return joined
else:
# Non-tuple iterable.
return echk.check_method_call_by_name('__next__', iterator, [], [], expr)[0]
def is_native_module(self, module: str) -> bool:
"""Is the given module one compiled by mypyc?"""
return module in self.mapper.group_map
def is_native_ref_expr(self, expr: RefExpr) -> bool:
if expr.node is None:
return False
if '.' in expr.node.fullname:
return self.is_native_module(expr.node.fullname.rpartition('.')[0])
return True
def is_native_module_ref_expr(self, expr: RefExpr) -> bool:
return self.is_native_ref_expr(expr) and expr.kind == GDEF
def is_synthetic_type(self, typ: TypeInfo) -> bool:
"""Is a type something other than just a class we've created?"""
return typ.is_named_tuple or typ.is_newtype or typ.typeddict_type is not None
def get_final_ref(self, expr: MemberExpr) -> Optional[Tuple[str, Var, bool]]:
"""Check if `expr` is a final attribute.
This needs to be done differently for class and module attributes to
correctly determine fully qualified name. Return a tuple that consists of
the qualified name, the corresponding Var node, and a flag indicating whether
the final name was defined in a compiled module. Return None if `expr` does not
refer to a final attribute.
"""
final_var = None
if isinstance(expr.expr, RefExpr) and isinstance(expr.expr.node, TypeInfo):
# a class attribute
sym = expr.expr.node.get(expr.name)
if sym and isinstance(sym.node, Var):
# Enum attribute are treated as final since they are added to the global cache
expr_fullname = expr.expr.node.bases[0].type.fullname
is_final = sym.node.is_final or expr_fullname == 'enum.Enum'
if is_final:
final_var = sym.node
fullname = '{}.{}'.format(sym.node.info.fullname, final_var.name)
native = self.is_native_module(expr.expr.node.module_name)
elif self.is_module_member_expr(expr):
# a module attribute
if isinstance(expr.node, Var) and expr.node.is_final:
final_var = expr.node
fullname = expr.node.fullname
native = self.is_native_ref_expr(expr)
if final_var is not None:
return fullname, final_var, native
return None
def emit_load_final(self, final_var: Var, fullname: str,
name: str, native: bool, typ: Type, line: int) -> Optional[Value]:
"""Emit code for loading value of a final name (if possible).
Args:
final_var: Var corresponding to the final name
fullname: its qualified name
name: shorter name to show in errors
native: whether the name was defined in a compiled module
typ: its type
line: line number where loading occurs
"""
if final_var.final_value is not None: # this is safe even for non-native names
return self.load_final_literal_value(final_var.final_value, line)
elif native:
return self.load_final_static(fullname, self.mapper.type_to_rtype(typ),
line, name)
else:
return None
def is_module_member_expr(self, expr: MemberExpr) -> bool:
return isinstance(expr.expr, RefExpr) and isinstance(expr.expr.node, MypyFile)
def call_refexpr_with_args(
self, expr: CallExpr, callee: RefExpr, arg_values: List[Value]) -> Value:
# Handle data-driven special-cased primitive call ops.
if callee.fullname is not None and expr.arg_kinds == [ARG_POS] * len(arg_values):
call_c_ops_candidates = c_function_ops.get(callee.fullname, [])
target = self.builder.matching_call_c(call_c_ops_candidates, arg_values,
expr.line, self.node_type(expr))
if target:
return target
ops = func_ops.get(callee.fullname, [])
target = self.builder.matching_primitive_op(
ops, arg_values, expr.line, self.node_type(expr)
)
if target:
return target
# Standard native call if signature and fullname are good and all arguments are positional
# or named.
callee_node = callee.node
if isinstance(callee_node, OverloadedFuncDef):
callee_node = callee_node.impl
if (callee_node is not None
and callee.fullname is not None
and callee_node in self.mapper.func_to_decl
and all(kind in (ARG_POS, ARG_NAMED) for kind in expr.arg_kinds)):
decl = self.mapper.func_to_decl[callee_node]
return self.builder.call(decl, arg_values, expr.arg_kinds, expr.arg_names, expr.line)
# Fall back to a Python call
function = self.accept(callee)
return self.py_call(function, arg_values, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
def shortcircuit_expr(self, expr: OpExpr) -> Value:
return self.builder.shortcircuit_helper(
expr.op, self.node_type(expr),
lambda: self.accept(expr.left),
lambda: self.accept(expr.right),
expr.line
)
# Conditional expressions
def process_conditional(self, e: Expression, true: BasicBlock, false: BasicBlock) -> None:
if isinstance(e, OpExpr) and e.op in ['and', 'or']:
if e.op == 'and':
# Short circuit 'and' in a conditional context.
new = BasicBlock()
self.process_conditional(e.left, new, false)
self.activate_block(new)
self.process_conditional(e.right, true, false)
else:
# Short circuit 'or' in a conditional context.
new = BasicBlock()
self.process_conditional(e.left, true, new)
self.activate_block(new)
self.process_conditional(e.right, true, false)
elif isinstance(e, UnaryExpr) and e.op == 'not':
self.process_conditional(e.expr, false, true)
# Catch-all for arbitrary expressions.
else:
reg = self.accept(e)
self.add_bool_branch(reg, true, false)
def flatten_classes(self, arg: Union[RefExpr, TupleExpr]) -> Optional[List[ClassIR]]:
"""Flatten classes in isinstance(obj, (A, (B, C))).
If at least one item is not a reference to a native class, return None.
"""
if isinstance(arg, RefExpr):
if isinstance(arg.node, TypeInfo) and self.is_native_module_ref_expr(arg):
ir = self.mapper.type_to_ir.get(arg.node)
if ir:
return [ir]
return None
else:
res = [] # type: List[ClassIR]
for item in arg.items:
if isinstance(item, (RefExpr, TupleExpr)):
item_part = self.flatten_classes(item)
if item_part is None:
return None
res.extend(item_part)
else:
return None
return res
# Basic helpers
def enter(self, fn_info: Union[FuncInfo, str] = '') -> None:
if isinstance(fn_info, str):
fn_info = FuncInfo(name=fn_info)
self.builder = LowLevelIRBuilder(self.current_module, self.mapper)
self.builders.append(self.builder)
self.fn_info = fn_info
self.fn_infos.append(self.fn_info)
self.ret_types.append(none_rprimitive)
if fn_info.is_generator:
self.nonlocal_control.append(GeneratorNonlocalControl())
else:
self.nonlocal_control.append(BaseNonlocalControl())
self.activate_block(BasicBlock())
def leave(self) -> Tuple[List[BasicBlock], Environment, RType, FuncInfo]:
builder = self.builders.pop()
ret_type = self.ret_types.pop()
fn_info = self.fn_infos.pop()
self.nonlocal_control.pop()
self.builder = self.builders[-1]
self.fn_info = self.fn_infos[-1]
return builder.blocks, builder.environment, ret_type, fn_info
def type_to_rtype(self, typ: Optional[Type]) -> RType:
return self.mapper.type_to_rtype(typ)
def node_type(self, node: Expression) -> RType:
if isinstance(node, IntExpr):
# TODO: Don't special case IntExpr
return int_rprimitive
if node not in self.types:
return object_rprimitive
mypy_type = self.types[node]
return self.type_to_rtype(mypy_type)
def add_var_to_env_class(self,
var: SymbolNode,
rtype: RType,
base: Union[FuncInfo, ImplicitClass],
reassign: bool = False) -> AssignmentTarget:
# First, define the variable name as an attribute of the environment class, and then
# construct a target for that attribute.
self.fn_info.env_class.attributes[var.name] = rtype
attr_target = AssignmentTargetAttr(base.curr_env_reg, var.name)
if reassign:
# Read the local definition of the variable, and set the corresponding attribute of
# the environment class' variable to be that value.
reg = self.read(self.environment.lookup(var), self.fn_info.fitem.line)
self.add(SetAttr(base.curr_env_reg, var.name, reg, self.fn_info.fitem.line))
# Override the local definition of the variable to instead point at the variable in
# the environment class.
return self.environment.add_target(var, attr_target)
def is_builtin_ref_expr(self, expr: RefExpr) -> bool:
assert expr.node, "RefExpr not resolved"
return '.' in expr.node.fullname and expr.node.fullname.split('.')[0] == 'builtins'
def load_global(self, expr: NameExpr) -> Value:
"""Loads a Python-level global.
This takes a NameExpr and uses its name as a key to retrieve the corresponding PyObject *
from the _globals dictionary in the C-generated code.
"""
# If the global is from 'builtins', turn it into a module attr load instead
if self.is_builtin_ref_expr(expr):
assert expr.node, "RefExpr not resolved"
return self.load_module_attr_by_fullname(expr.node.fullname, expr.line)
if (self.is_native_module_ref_expr(expr) and isinstance(expr.node, TypeInfo)
and not self.is_synthetic_type(expr.node)):
assert expr.fullname is not None
return self.load_native_type_object(expr.fullname)
return self.load_global_str(expr.name, expr.line)
def load_global_str(self, name: str, line: int) -> Value:
_globals = self.load_globals_dict()
reg = self.load_static_unicode(name)
return self.call_c(dict_get_item_op, [_globals, reg], line)
def load_globals_dict(self) -> Value:
return self.add(LoadStatic(dict_rprimitive, 'globals', self.module_name))
def load_module_attr_by_fullname(self, fullname: str, line: int) -> Value:
module, _, name = fullname.rpartition('.')
left = self.load_module(module)
return self.py_get_attr(left, name, line)
# Lacks a good type because there wasn't a reasonable type in 3.5 :(
def catch_errors(self, line: int) -> Any:
return catch_errors(self.module_path, line)
def warning(self, msg: str, line: int) -> None:
self.errors.warning(msg, self.module_path, line)
def error(self, msg: str, line: int) -> None:
self.errors.error(msg, self.module_path, line)
def gen_arg_defaults(builder: IRBuilder) -> None:
"""Generate blocks for arguments that have default values.
If the passed value is an error value, then assign the default
value to the argument.
"""
fitem = builder.fn_info.fitem
for arg in fitem.arguments:
if arg.initializer:
target = builder.environment.lookup(arg.variable)
def get_default() -> Value:
assert arg.initializer is not None
# If it is constant, don't bother storing it
if is_constant(arg.initializer):
return builder.accept(arg.initializer)
# Because gen_arg_defaults runs before calculate_arg_defaults, we
# add the static/attribute to final_names/the class here.
elif not builder.fn_info.is_nested:
name = fitem.fullname + '.' + arg.variable.name
builder.final_names.append((name, target.type))
return builder.add(LoadStatic(target.type, name, builder.module_name))
else:
name = arg.variable.name
builder.fn_info.callable_class.ir.attributes[name] = target.type
return builder.add(
GetAttr(builder.fn_info.callable_class.self_reg, name, arg.line))
assert isinstance(target, AssignmentTargetRegister)
builder.assign_if_null(target, get_default, arg.initializer.line)
|
the-stack_0_8877 | import pathlib
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models import *
from torchvision.utils import save_image
import numpy as np
from tqdm import tqdm
from skimage import feature
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from coco import COCOKeypoint
class PoseModel(nn.Module):
def __init__(self):
super().__init__()
densenet = densenet121(pretrained=True)
self.backbone = densenet.features
self.decode = nn.Sequential(
nn.ConvTranspose2d(1024, 512, (2, 2), stride=2),
nn.BatchNorm2d(512),
nn.LeakyReLU(),
nn.ConvTranspose2d(512, 256, (2, 2), stride=2),
nn.BatchNorm2d(256),
nn.LeakyReLU(),
nn.ConvTranspose2d(256, 64, (2, 2), stride=2),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 20, (1, 1)),
nn.BatchNorm2d(20),
)
def forward(self, x):
feature = self.backbone(x)
output = self.decode(feature)
lbl = F.tanh(output[:, :17, ...])
tag = F.tanh(output[:, 17:, ...])
return lbl, tag
class LblLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred_batch, true_batch):
wgt = torch.ones_like(pred_batch)
wgt[true_batch > 0] = 100
dis = (pred_batch - true_batch)**2
return (dis * wgt).mean()
class TagLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred_batch, kpt_batch, vis_batch, tag_batch):
batch_size, D, lblH, lblW = pred_batch.size()
device = pred_batch.device
losses = torch.zeros(batch_size, dtype=torch.float, device=device)
unnorm_term = torch.tensor([lblW, lblH], dtype=torch.float, device=device)
for i in range(batch_size):
pred = pred_batch[i] # (D, dstH, dstW)
viss = vis_batch[i].to(device) # (n_people * 17,)
tags = tag_batch[i].to(device) # (n_people * 17,)
kpts = kpt_batch[i].to(device) # (n_people * 17, 2)
kpts = kpts[viss > 0] * unnorm_term
kpts = torch.floor(kpts).long() # Don't use round -> index out of range
true_ebd = tags[viss > 0]
pred_ebd = pred[:, kpts[:, 0], kpts[:, 1]]
K = true_ebd.size(0)
A = true_ebd.expand(K, K) # (K, K)
B = A.t() # (K, K)
true_similarity = (A == B).float() # (K, K)
A = pred_ebd.unsqueeze(1) # (D, 1, K)
A = A.expand(D, K, K) # (D, K, K)
B = pred_ebd.unsqueeze(2) # (D, K, 1)
B = B.expand(D, K, K) # (D, K, K)
exponent = ((A - B)**2).mean(dim=0) # (K, K)
pred_similarity = 2 / (1 + torch.exp(exponent))
wgt = torch.zeros(K, K, dtype=torch.float, device=device)
wgt[(true_similarity > 0) | (pred_similarity > 0)] = 10.0
dis = (pred_similarity - true_similarity)**2
losses[i] = (dis * wgt).mean()
return torch.mean(losses)
class RunningAverage(object):
def __init__(self):
super().__init__()
self.iter = 0
self.avg = 0.0
def update(self, x):
self.avg = (self.avg * self.iter + x) / (self.iter + 1)
self.iter += 1
def __str__(self):
if self.iter == 0:
return '-'
return f'{self.avg:.4f}'
class PoseEstimator:
def __init__(self, log_dir, device):
self.device = device
self.log_dir = pathlib.Path(log_dir)
self.log_dir.mkdir(exist_ok=True)
self.model = PoseModel().to(device)
self.optim = torch.optim.Adam(self.model.parameters(), lr=0.01)
self.decay = torch.optim.lr_scheduler.StepLR(self.optim, step_size=10)
self.lbl_criterion = LblLoss()
self.tag_criterion = TagLoss()
def fit(self, train_set, valid_set, vis_set, epoch=100):
self.train_loader = DataLoader(train_set, batch_size=16,
shuffle=True, collate_fn=COCOKeypoint.collate_fn, num_workers=4)
self.valid_loader = DataLoader(valid_set, batch_size=16,
shuffle=False, collate_fn=COCOKeypoint.collate_fn, num_workers=4)
self.vis_loader = DataLoader(vis_set, batch_size=16,
shuffle=False, collate_fn=COCOKeypoint.collate_fn, num_workers=4)
self.log = pd.DataFrame()
for self.ep in range(epoch):
self.epoch_dir = (self.log_dir / f'{self.ep:03d}')
self.epoch_dir.mkdir()
self.msg = dict()
tqdm_args = {
'total': len(train_set) + len(valid_set),
'desc': f'Epoch {self.ep:03d}',
'ascii': True,
}
with tqdm(**tqdm_args) as self.pbar:
self.decay.step()
self._train()
with torch.no_grad():
self._valid()
self._vis()
self._log()
def _train(self):
self.msg.update({
'loss': RunningAverage(),
'lbl_loss': RunningAverage(),
'tag_loss': RunningAverage()
})
self.model.train()
for img_batch, lbl_batch, kpt_batch, \
vis_batch, tag_batch, box_batch in iter(self.train_loader):
img_batch = img_batch.to(self.device)
lbl_batch = lbl_batch.to(self.device)
self.optim.zero_grad()
pred_lbl, pred_tag = self.model(img_batch)
lbl_loss = self.lbl_criterion(pred_lbl, lbl_batch)
tag_loss = self.tag_criterion(pred_tag, kpt_batch, vis_batch, tag_batch) * 0.005
loss = lbl_loss + tag_loss
loss.backward()
self.optim.step()
self.msg['loss'].update(loss.item())
self.msg['lbl_loss'].update(lbl_loss.item())
self.msg['tag_loss'].update(tag_loss.item())
self.pbar.set_postfix(self.msg)
self.pbar.update(len(img_batch))
def _valid(self):
self.msg.update({
'val_loss': RunningAverage(),
'val_lbl_loss': RunningAverage(),
'val_tag_loss': RunningAverage()
})
self.model.eval()
for img_batch, lbl_batch, kpt_batch, \
vis_batch, tag_batch, box_batch in iter(self.valid_loader):
img_batch = img_batch.to(self.device)
lbl_batch = lbl_batch.to(self.device)
pred_lbl, pred_tag = self.model(img_batch)
lbl_loss = self.lbl_criterion(pred_lbl, lbl_batch)
tag_loss = self.tag_criterion(pred_tag, kpt_batch, vis_batch, tag_batch) * 0.005
loss = lbl_loss + tag_loss
self.msg['val_loss'].update(loss.item())
self.msg['val_lbl_loss'].update(lbl_loss.item())
self.msg['val_tag_loss'].update(tag_loss.item())
self.pbar.update(len(img_batch))
self.pbar.set_postfix(self.msg)
def _vis(self):
self.model.eval()
idx = 0
for img_batch, lbl_batch, kpt_batch, \
vis_batch, tag_batch, box_batch in iter(self.vis_loader):
pred_lbl, pred_tag = self.model(img_batch.to(self.device))
pred_lbl = pred_lbl.cpu()
pred_tag = pred_tag.cpu()
pred_lbl = F.sigmoid(pred_lbl)
pred_tag = F.sigmoid(pred_tag)
batch_size, _, H, W = img_batch.size()
pred_tag = F.upsample(pred_tag, (H, W))
for i in range(batch_size):
img = img_batch[i]
vis_lbl = torch.cat((lbl_batch[i], pred_lbl[i]), dim=0).unsqueeze(1)
vis_tag = pred_tag[i] * 0.7 + 0.3 * img
save_image(img, f'{self.epoch_dir}/{idx:05d}.jpg')
save_image(vis_lbl, f'{self.epoch_dir}/{idx:05d}_lbl.jpg', nrow=17, pad_value=1)
save_image(vis_tag, f'{self.epoch_dir}/{idx:05d}_tag.jpg')
idx += 1
def _log(self):
new_row = dict((k, v.avg) for k, v in self.msg.items())
self.log = self.log.append(new_row, ignore_index=True)
self.log.to_csv(str(self.log_dir / 'log.csv'))
# plot loss
fig, ax = plt.subplots(1, 3, dpi=100)
self.log[['loss', 'val_loss']].plot(ax=ax[0])
self.log[['lbl_loss', 'val_lbl_loss']].plot(ax=ax[1])
self.log[['tag_loss', 'val_tag_loss']].plot(ax=ax[2])
fig.tight_layout()
fig.savefig(str(self.log_dir / 'loss.jpg'))
plt.close() # Close plot to prevent RE
# model
torch.save(self.model, str(self.epoch_dir / 'model.pth'))
if __name__ == '__main__':
img_dir = '/store/COCO/val2017/'
anno_path = '/store/COCO/annotations/person_keypoints_val2017.json'
ds = COCOKeypoint(img_dir, anno_path, img_size=(384, 384), lbl_size=(96, 96))
dl = DataLoader(ds, batch_size=16, shuffle=True, collate_fn=ds.collate_fn, num_workers=1)
device = torch.device('cuda')
model = PoseModel().to(device)
model = model.train()
optim = torch.optim.Adam(model.parameters(), lr=0.001)
pbar = tqdm(total=len(ds), ascii=True)
for img_batch, lbl_batch, kpt_batch, vis_batch, tag_batch, box_batch in dl:
img_batch = img_batch.to(device)
lbl_batch = lbl_batch.to(device)
pred_lbl, pred_tag = model(img_batch)
optim.zero_grad()
lbl_loss = LblLoss()(pred_lbl, lbl_batch)
tag_loss = TagLoss()(pred_tag, kpt_batch, vis_batch, tag_batch)
loss = lbl_loss + tag_loss
loss.backward()
optim.step()
pbar.update(len(img_batch))
pbar.set_postfix({
'loss': loss.item(),
'lbl_loss': lbl_loss.item(),
'tag_loss': tag_loss.item()
})
pbar.close()
|
the-stack_0_8878 | # TODO(matt): Reformat script.
"""
Big Data Training
=================
"""
###############################################################################
# train
###############################################################################
import argparse
import collections
import os
import sys
import time
from typing import Tuple
import boto3
import mlflow
import pandas as pd
import ray
import torch
import torch.nn as nn
import torch.optim as optim
from ray import train
from ray.data.aggregate import Mean, Std
from ray.train import Trainer
from ray.train.callbacks.logging import MLflowLoggerCallback
from ray.train.callbacks import TBXLoggerCallback
from torch.nn.parallel import DistributedDataParallel
def make_and_upload_dataset(dir_path):
import random
import os
import pandas as pd
import sklearn.datasets
NUM_EXAMPLES = 2_000_000
NUM_FEATURES = 20
PARQUET_FILE_CHUNK_SIZE = 50_000
NUM_FILES = NUM_EXAMPLES // PARQUET_FILE_CHUNK_SIZE
def create_data_chunk(n, d, seed, include_label=False):
X, y = sklearn.datasets.make_classification(
n_samples=n,
n_features=d,
n_informative=10,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=3,
weights=None,
flip_y=0.03,
class_sep=0.8,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=False,
random_state=seed,
)
# turn into dataframe with column names
col_names = ["feature_%0d" % i for i in range(1, d + 1, 1)]
df = pd.DataFrame(X)
df.columns = col_names
# add some bogus categorical data columns
options = ["apple", "banana", "orange"]
df["fruit"] = df.feature_1.map(
lambda x: random.choice(options)
) # bogus, but nice to test categoricals
# add some nullable columns
options = [None, 1, 2]
df["nullable_feature"] = df.feature_1.map(
lambda x: random.choice(options)
) # bogus, but nice to test categoricals
# add label column
if include_label:
df["label"] = y
return df
# create data files
print("Creating synthetic dataset...")
data_path = os.path.join(dir_path, "data")
os.makedirs(data_path, exist_ok=True)
for i in range(NUM_FILES):
path = os.path.join(data_path, f"data_{i:05d}.parquet.snappy")
if not os.path.exists(path):
tmp_df = create_data_chunk(
n=PARQUET_FILE_CHUNK_SIZE, d=NUM_FEATURES, seed=i, include_label=True
)
tmp_df.to_parquet(path, compression="snappy", index=False)
print(f"Wrote {path} to disk...")
# todo: at large enough scale we might want to upload the rest after
# first N files rather than write to disk
# to simulate a user with local copy of subset of data
print("Creating synthetic inference dataset...")
inference_path = os.path.join(dir_path, "inference")
os.makedirs(inference_path, exist_ok=True)
for i in range(NUM_FILES):
path = os.path.join(inference_path, f"data_{i:05d}.parquet.snappy")
if not os.path.exists(path):
tmp_df = create_data_chunk(
n=PARQUET_FILE_CHUNK_SIZE, d=NUM_FEATURES, seed=i, include_label=False
)
tmp_df.to_parquet(path, compression="snappy", index=False)
print(f"Wrote {path} to disk...")
# todo: at large enough scale we might want to upload the rest after
# first N files rather than write to disk
# to simulate a user with local copy of subset of data
# os.system("aws s3 sync ./data s3://cuj-big-data/data")
# os.system("aws s3 sync ./inference s3://cuj-big-data/inference")
def read_dataset(path: str) -> ray.data.Dataset:
print(f"reading data from {path}")
return ray.data.read_parquet(path).random_shuffle()
class DataPreprocessor:
"""A Datasets-based preprocessor that fits scalers/encoders to the training
dataset and transforms the training, testing, and inference datasets using
those fitted scalers/encoders.
"""
def __init__(self):
# List of present fruits, used for one-hot encoding of fruit column.
self.fruits = None
# Mean and stddev stats used for standard scaling of the feature
# columns.
self.standard_stats = None
def preprocess_train_data(
self, ds: ray.data.Dataset
) -> Tuple[ray.data.Dataset, ray.data.Dataset]:
print("\n\nPreprocessing training dataset.\n")
return self._preprocess(ds, False)
def preprocess_inference_data(self, df: ray.data.Dataset) -> ray.data.Dataset:
print("\n\nPreprocessing inference dataset.\n")
return self._preprocess(df, True)[0]
def _preprocess(
self, ds: ray.data.Dataset, inferencing: bool
) -> Tuple[ray.data.Dataset, ray.data.Dataset]:
print("\nStep 1: Dropping nulls, creating new_col, updating feature_1\n")
def batch_transformer(df: pd.DataFrame):
# Disable chained assignment warning.
pd.options.mode.chained_assignment = None
# Drop nulls.
df = df.dropna(subset=["nullable_feature"])
# Add new column.
df["new_col"] = (
df["feature_1"] - 2 * df["feature_2"] + df["feature_3"]
) / 3.0
# Transform column.
df["feature_1"] = 2.0 * df["feature_1"] + 0.1
return df
ds = ds.map_batches(batch_transformer, batch_format="pandas")
print(
"\nStep 2: Precalculating fruit-grouped mean for new column and "
"for one-hot encoding (latter only uses fruit groups)\n"
)
agg_ds = ds.groupby("fruit").mean("feature_1")
fruit_means = {r["fruit"]: r["mean(feature_1)"] for r in agg_ds.take_all()}
print(
"\nStep 3: create mean_by_fruit as mean of feature_1 groupby "
"fruit; one-hot encode fruit column\n"
)
if inferencing:
assert self.fruits is not None
else:
assert self.fruits is None
self.fruits = list(fruit_means.keys())
fruit_one_hots = {
fruit: collections.defaultdict(int, fruit=1) for fruit in self.fruits
}
def batch_transformer(df: pd.DataFrame):
# Add column containing the feature_1-mean of the fruit groups.
df["mean_by_fruit"] = df["fruit"].map(fruit_means)
# One-hot encode the fruit column.
for fruit, one_hot in fruit_one_hots.items():
df[f"fruit_{fruit}"] = df["fruit"].map(one_hot)
# Drop the fruit column, which is no longer needed.
df.drop(columns="fruit", inplace=True)
return df
ds = ds.map_batches(batch_transformer, batch_format="pandas")
if inferencing:
print("\nStep 4: Standardize inference dataset\n")
assert self.standard_stats is not None
else:
assert self.standard_stats is None
print("\nStep 4a: Split training dataset into train-test split\n")
# Split into train/test datasets.
split_index = int(0.9 * ds.count())
# Split into 90% training set, 10% test set.
train_ds, test_ds = ds.split_at_indices([split_index])
print(
"\nStep 4b: Precalculate training dataset stats for "
"standard scaling\n"
)
# Calculate stats needed for standard scaling feature columns.
feature_columns = [col for col in train_ds.schema().names if col != "label"]
standard_aggs = [
agg(on=col) for col in feature_columns for agg in (Mean, Std)
]
self.standard_stats = train_ds.aggregate(*standard_aggs)
print("\nStep 4c: Standardize training dataset\n")
# Standard scaling of feature columns.
standard_stats = self.standard_stats
def batch_standard_scaler(df: pd.DataFrame):
def column_standard_scaler(s: pd.Series):
if s.name == "label":
# Don't scale the label column.
return s
s_mean = standard_stats[f"mean({s.name})"]
s_std = standard_stats[f"std({s.name})"]
return (s - s_mean) / s_std
return df.transform(column_standard_scaler)
if inferencing:
# Apply standard scaling to inference dataset.
inference_ds = ds.map_batches(batch_standard_scaler, batch_format="pandas")
return inference_ds, None
else:
# Apply standard scaling to both training dataset and test dataset.
train_ds = train_ds.map_batches(
batch_standard_scaler, batch_format="pandas"
)
test_ds = test_ds.map_batches(batch_standard_scaler, batch_format="pandas")
return train_ds, test_ds
def inference(
dataset, model_cls: type, batch_size: int, result_path: str, use_gpu: bool
):
print("inferencing...")
num_gpus = 1 if use_gpu else 0
dataset.map_batches(
model_cls,
compute="actors",
batch_size=batch_size,
batch_format="pandas",
num_gpus=num_gpus,
num_cpus=0,
).write_parquet(result_path)
"""
TODO: Define neural network code in pytorch
P0:
1. can take arguments to change size of net arbitrarily so we can stress test
against distributed training on cluster
2. has a network (nn.module?), optimizer, and loss function for binary
classification
3. has some semblence of regularization (ie: via dropout) so that this
artificially gigantic net doesn"t just overfit horrendously
4. works well with pytorch dataset we"ll create from Ray data
.to_torch_dataset()
P1:
1. also tracks AUC for training, testing sets and records to tensorboard to
"""
class Net(nn.Module):
def __init__(self, n_layers, n_features, num_hidden, dropout_every, drop_prob):
super().__init__()
self.n_layers = n_layers
self.dropout_every = dropout_every
self.drop_prob = drop_prob
self.fc_input = nn.Linear(n_features, num_hidden)
self.relu_input = nn.ReLU()
for i in range(self.n_layers):
layer = nn.Linear(num_hidden, num_hidden)
relu = nn.ReLU()
dropout = nn.Dropout(p=self.drop_prob)
setattr(self, f"fc_{i}", layer)
setattr(self, f"relu_{i}", relu)
if i % self.dropout_every == 0:
# only apply every few layers
setattr(self, f"drop_{i}", dropout)
self.add_module(f"drop_{i}", dropout)
self.add_module(f"fc_{i}", layer)
self.fc_output = nn.Linear(num_hidden, 1)
def forward(self, x):
x = self.fc_input(x)
x = self.relu_input(x)
for i in range(self.n_layers):
x = getattr(self, f"fc_{i}")(x)
x = getattr(self, f"relu_{i}")(x)
if i % self.dropout_every == 0:
x = getattr(self, f"drop_{i}")(x)
x = self.fc_output(x)
return x
def train_epoch(dataset, model, device, criterion, optimizer):
num_correct = 0
num_total = 0
running_loss = 0.0
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
loss.backward()
optimizer.step()
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"training batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def test_epoch(dataset, model, device, criterion):
num_correct = 0
num_total = 0
running_loss = 0.0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"testing batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def train_func(config):
use_gpu = config["use_gpu"]
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
print("Defining model, loss, and optimizer...")
# Setup device.
device = torch.device(
f"cuda:{train.local_rank()}" if use_gpu and torch.cuda.is_available() else "cpu"
)
print(f"Device: {device}")
# Setup data.
train_dataset_pipeline = train.get_dataset_shard("train_dataset")
train_dataset_epoch_iterator = train_dataset_pipeline.iter_epochs()
test_dataset = train.get_dataset_shard("test_dataset")
test_torch_dataset = test_dataset.to_torch(
label_column="label", batch_size=batch_size
)
net = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob,
).to(device)
print(net.parameters)
net = train.torch.prepare_model(net)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters(), weight_decay=0.0001)
print("Starting training...")
for epoch in range(num_epochs):
train_dataset = next(train_dataset_epoch_iterator)
train_torch_dataset = train_dataset.to_torch(
label_column="label", batch_size=batch_size
)
train_running_loss, train_num_correct, train_num_total = train_epoch(
train_torch_dataset, net, device, criterion, optimizer
)
train_acc = train_num_correct / train_num_total
print(
f"epoch [{epoch + 1}]: training accuracy: "
f"{train_num_correct} / {train_num_total} = {train_acc:.4f}"
)
test_running_loss, test_num_correct, test_num_total = test_epoch(
test_torch_dataset, net, device, criterion
)
test_acc = test_num_correct / test_num_total
print(
f"epoch [{epoch + 1}]: testing accuracy: "
f"{test_num_correct} / {test_num_total} = {test_acc:.4f}"
)
# Record and log stats.
train.report(
train_acc=train_acc,
train_loss=train_running_loss,
test_acc=test_acc,
test_loss=test_running_loss,
)
# Checkpoint model.
module = net.module if isinstance(net, DistributedDataParallel) else net
train.save_checkpoint(model_state_dict=module.state_dict())
if train.world_rank() == 0:
return module.cpu()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir-path", default=".", type=str, help="Path to read and write data from"
)
parser.add_argument(
"--use-s3",
action="store_true",
default=False,
help="Use data from s3 for testing.",
)
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.",
)
parser.add_argument(
"--address",
required=False,
type=str,
help="The address to use for Ray. " "`auto` if running through `ray submit.",
)
parser.add_argument(
"--num-workers",
default=1,
type=int,
help="The number of Ray workers to use for distributed training",
)
parser.add_argument(
"--large-dataset", action="store_true", default=False, help="Use 500GB dataset"
)
parser.add_argument(
"--use-gpu", action="store_true", default=False, help="Use GPU for training."
)
parser.add_argument(
"--mlflow-register-model",
action="store_true",
help="Whether to use mlflow model registry. If set, a local MLflow "
"tracking server is expected to have already been started.",
)
args = parser.parse_args()
smoke_test = args.smoke_test
address = args.address
num_workers = args.num_workers
use_gpu = args.use_gpu
use_s3 = args.use_s3
dir_path = args.dir_path
large_dataset = args.large_dataset
if large_dataset:
assert use_s3, "--large-dataset requires --use-s3 to be set."
start_time = time.time()
ray.init(address=address)
make_and_upload_dataset(dir_path)
# Setup MLflow.
# By default, all metrics & artifacts for each run will be saved to disk
# in ./mlruns directory. Uncomment the below lines if you want to change
# the URI for the tracking uri.
# TODO: Use S3 backed tracking server for golden notebook.
if args.mlflow_register_model:
# MLflow model registry does not work with a local file system backend.
# Have to start a mlflow tracking server on localhost
mlflow.set_tracking_uri("http://127.0.0.1:5000")
# Set the experiment. This will create the experiment if not already
# exists.
mlflow.set_experiment("cuj-big-data-training")
if use_s3:
# Check if s3 data is populated.
BUCKET_NAME = "cuj-big-data"
FOLDER_NAME = "data/"
s3_resource = boto3.resource("s3")
bucket = s3_resource.Bucket(BUCKET_NAME)
count = bucket.objects.filter(Prefix=FOLDER_NAME)
if len(list(count)) == 0:
print("please run `python make_and_upload_dataset.py` first")
sys.exit(1)
data_path = (
"s3://cuj-big-data/big-data/"
if large_dataset
else "s3://cuj-big-data/data/"
)
inference_path = "s3://cuj-big-data/inference/"
inference_output_path = "s3://cuj-big-data/output/"
else:
data_path = os.path.join(dir_path, "data")
inference_path = os.path.join(dir_path, "inference")
inference_output_path = "/tmp"
if len(os.listdir(data_path)) <= 1 or len(os.listdir(inference_path)) <= 1:
print("please run `python make_and_upload_dataset.py` first")
sys.exit(1)
if smoke_test:
# Only read a single file.
data_path = os.path.join(data_path, "data_00000.parquet.snappy")
inference_path = os.path.join(inference_path, "data_00000.parquet.snappy")
preprocessor = DataPreprocessor()
train_dataset, test_dataset = preprocessor.preprocess_train_data(
read_dataset(data_path)
)
num_columns = len(train_dataset.schema().names)
# remove label column.
num_features = num_columns - 1
NUM_EPOCHS = 2
BATCH_SIZE = 512
NUM_HIDDEN = 50 # 200
NUM_LAYERS = 3 # 15
DROPOUT_EVERY = 5
DROPOUT_PROB = 0.2
# Random global shuffle
train_dataset_pipeline = train_dataset.repeat().random_shuffle_each_window()
del train_dataset
datasets = {"train_dataset": train_dataset_pipeline, "test_dataset": test_dataset}
config = {
"use_gpu": use_gpu,
"num_epochs": NUM_EPOCHS,
"batch_size": BATCH_SIZE,
"num_hidden": NUM_HIDDEN,
"num_layers": NUM_LAYERS,
"dropout_every": DROPOUT_EVERY,
"dropout_prob": DROPOUT_PROB,
"num_features": num_features,
}
# Create 2 callbacks: one for TensorBoard Logging and one for MLflow
# logging. Pass these into Trainer, and all results that are
# reported by ``train.report()`` will be logged to these 2 places.
# TODO: TBXLoggerCallback should create nonexistent logdir
# and should also create 1 directory per file.
tbx_logdir = "./runs"
os.makedirs(tbx_logdir, exist_ok=True)
callbacks = [
TBXLoggerCallback(logdir=tbx_logdir),
MLflowLoggerCallback(
experiment_name="cuj-big-data-training", save_artifact=True
),
]
# Remove CPU resource so Datasets can be scheduled.
resources_per_worker = {"CPU": 0, "GPU": 1} if use_gpu else None
trainer = Trainer(
backend="torch",
num_workers=num_workers,
use_gpu=use_gpu,
resources_per_worker=resources_per_worker,
)
trainer.start()
results = trainer.run(
train_func=train_func, config=config, callbacks=callbacks, dataset=datasets
)
model = results[0]
trainer.shutdown()
if args.mlflow_register_model:
mlflow.pytorch.log_model(
model, artifact_path="models", registered_model_name="torch_model"
)
# Get the latest model from mlflow model registry.
client = mlflow.tracking.MlflowClient()
registered_model_name = "torch_model"
# Get the info for the latest model.
# By default, registered models are in stage "None".
latest_model_info = client.get_latest_versions(
registered_model_name, stages=["None"]
)[0]
latest_version = latest_model_info.version
def load_model_func():
model_uri = f"models:/torch_model/{latest_version}"
return mlflow.pytorch.load_model(model_uri)
else:
state_dict = model.state_dict()
def load_model_func():
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
model = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob,
)
model.load_state_dict(state_dict)
return model
class BatchInferModel:
def __init__(self, load_model_func):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = load_model_func().to(self.device)
def __call__(self, batch) -> "pd.DataFrame":
tensor = torch.FloatTensor(batch.values).to(self.device)
return pd.DataFrame(
self.model(tensor).cpu().detach().numpy(), columns=["value"]
)
inference_dataset = preprocessor.preprocess_inference_data(
read_dataset(inference_path)
)
inference(
inference_dataset,
BatchInferModel(load_model_func),
100,
inference_output_path,
use_gpu,
)
end_time = time.time()
total_time = end_time - start_time
print(f"Job finished in {total_time} seconds.")
|
the-stack_0_8881 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.google.cloud.transfers.azure_fileshare_to_gcs import AzureFileShareToGCSOperator
TASK_ID = 'test-azure-fileshare-to-gcs'
AZURE_FILESHARE_SHARE = 'test-share'
AZURE_FILESHARE_DIRECTORY_NAME = '/path/to/dir'
GCS_PATH_PREFIX = 'gs://gcs-bucket/data/'
MOCK_FILES = ["TEST1.csv", "TEST2.csv", "TEST3.csv"]
WASB_CONN_ID = 'wasb_default'
GCS_CONN_ID = 'google_cloud_default'
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
class TestAzureFileShareToGCSOperator(unittest.TestCase):
def test_init(self):
"""Test AzureFileShareToGCSOperator instance is properly initialized."""
operator = AzureFileShareToGCSOperator(
task_id=TASK_ID,
share_name=AZURE_FILESHARE_SHARE,
directory_name=AZURE_FILESHARE_DIRECTORY_NAME,
wasb_conn_id=WASB_CONN_ID,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
self.assertEqual(operator.task_id, TASK_ID)
self.assertEqual(operator.share_name, AZURE_FILESHARE_SHARE)
self.assertEqual(operator.directory_name, AZURE_FILESHARE_DIRECTORY_NAME)
self.assertEqual(operator.wasb_conn_id, WASB_CONN_ID)
self.assertEqual(operator.gcp_conn_id, GCS_CONN_ID)
self.assertEqual(operator.dest_gcs, GCS_PATH_PREFIX)
self.assertEqual(operator.google_impersonation_chain, IMPERSONATION_CHAIN)
@mock.patch('airflow.providers.google.cloud.transfers.azure_fileshare_to_gcs.AzureFileShareHook')
@mock.patch('airflow.providers.google.cloud.transfers.azure_fileshare_to_gcs.GCSHook')
def test_execute(self, gcs_mock_hook, azure_fileshare_mock_hook):
"""Test the execute function when the run is successful."""
operator = AzureFileShareToGCSOperator(
task_id=TASK_ID,
share_name=AZURE_FILESHARE_SHARE,
directory_name=AZURE_FILESHARE_DIRECTORY_NAME,
wasb_conn_id=WASB_CONN_ID,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
azure_fileshare_mock_hook.return_value.list_files.return_value = MOCK_FILES
uploaded_files = operator.execute(None)
gcs_mock_hook.return_value.upload.assert_has_calls(
[
mock.call('gcs-bucket', 'data/TEST1.csv', mock.ANY, gzip=False),
mock.call('gcs-bucket', 'data/TEST3.csv', mock.ANY, gzip=False),
mock.call('gcs-bucket', 'data/TEST2.csv', mock.ANY, gzip=False),
],
any_order=True,
)
azure_fileshare_mock_hook.assert_called_once_with(WASB_CONN_ID)
gcs_mock_hook.assert_called_once_with(
google_cloud_storage_conn_id=GCS_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
self.assertEqual(sorted(MOCK_FILES), sorted(uploaded_files))
@mock.patch('airflow.providers.google.cloud.transfers.azure_fileshare_to_gcs.AzureFileShareHook')
@mock.patch('airflow.providers.google.cloud.transfers.azure_fileshare_to_gcs.GCSHook')
def test_execute_with_gzip(self, gcs_mock_hook, azure_fileshare_mock_hook):
"""Test the execute function when the run is successful."""
operator = AzureFileShareToGCSOperator(
task_id=TASK_ID,
share_name=AZURE_FILESHARE_SHARE,
directory_name=AZURE_FILESHARE_DIRECTORY_NAME,
wasb_conn_id=WASB_CONN_ID,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
google_impersonation_chain=IMPERSONATION_CHAIN,
gzip=True,
)
azure_fileshare_mock_hook.return_value.list_files.return_value = MOCK_FILES
operator.execute(None)
gcs_mock_hook.return_value.upload.assert_has_calls(
[
mock.call('gcs-bucket', 'data/TEST1.csv', mock.ANY, gzip=True),
mock.call('gcs-bucket', 'data/TEST3.csv', mock.ANY, gzip=True),
mock.call('gcs-bucket', 'data/TEST2.csv', mock.ANY, gzip=True),
],
any_order=True,
)
|
the-stack_0_8882 | """Configuration file."""
import numpy as np
import mne
# Empty config
CONFIG = dict()
# Supported MNE types
MNE_EPOCHS_TYPE = (mne.Epochs, mne.EpochsArray, mne.epochs.EpochsFIF,
mne.epochs.BaseEpochs)
CONFIG["MNE_EPOCHS_TYPE"] = MNE_EPOCHS_TYPE
CONFIG["MNE_EPOCHSTFR_TYPE"] = (mne.time_frequency.EpochsTFR)
# Int and Float types
INT_DTYPE = (int, np.int8, np.int16, np.int32, np.int64)
FLOAT_DTYPE = (float, np.float16, np.float32, np.float64)
STR_DTYPE = (str, np.string_)
CONFIG['INT_DTYPE'] = INT_DTYPE
CONFIG['FLOAT_DTYPE'] = FLOAT_DTYPE
CONFIG['STR_DTYPE'] = STR_DTYPE
# gcmi configuration
CONFIG["KW_GCMI"] = dict(shape_checking=False, biascorrect=True,
demeaned=False, mvaxis=-2, traxis=-1)
# copula name conversion
CONFIG["COPULA_CONV"] = dict(cc='gg', cd='gd', ccd='ggd')
# mi types table
CONFIG['MI_TABLE'] = {
'int': {
'none': 'cd',
'int': 'cd',
'float': 'none'
},
'float': {
'none': 'cc',
'int': 'ccd',
'float': 'ccc'
},
'none': {
'none': 'none',
'int': 'none',
'float': 'none',
}
}
# mi type full description
CONFIG['MI_REPR'] = {
'none': 'none',
'cc': 'I(x; y (continuous))',
'cd': 'I(x; y (discret))',
'ccd': 'I(x; y (continuous)) | z (discret)',
'ccc': 'I(x; y (continuous)) | z (continuous)',
}
# general joblib config
CONFIG["JOBLIB_CFG"] = dict()
"""
shuffling method for computing the gcmi_stats_ccd. Use :
* 'c' : shuffle only the continuous variable
* 'd' : shuffle only the discret variable
* 'cd' : shuffle both the continuous and discrete variables (default)
"""
CONFIG["MI_PERM_CCD"] = 'cd'
"""
Several functions can be compiled using Numba. Use this argument to specify if
Numba compilation should be used or not
"""
CONFIG['USE_NUMBA'] = True
"""
MarsAtlas region of interest names
"""
CONFIG['MA_NAMES'] = [
'L_VCcm', 'L_VCl', 'L_VCs', 'L_Cu', 'L_VCrm', 'L_ITCm', 'L_ITCr', 'L_MTCc',
'L_STCc', 'L_STCr', 'L_MTCr', 'L_ICC', 'L_IPCv', 'L_IPCd', 'L_SPC',
'L_SPCm', 'L_PCm', 'L_PCC', 'L_Sv', 'L_Sdl', 'L_Sdm', 'L_Mv', 'L_Mdl',
'L_Mdm', 'L_PMrv', 'L_PMdl', 'L_PMdm', 'L_PFcdl', 'L_PFcdm', 'L_MCC',
'L_PFrvl', 'L_Pfrdli', 'L_Pfrdls', 'L_PFrd', 'L_PFrm', 'L_OFCvl', 'L_OFCv',
'L_OFCvm', 'L_PFCvm', 'L_ACC', 'L_Insula', 'R_VCcm', 'R_VCl', 'R_VCs',
'R_Cu', 'R_VCrm', 'R_ITCm', 'R_ITCr', 'R_MTCc', 'R_STCc', 'R_STCr',
'R_MTCr', 'R_ICC', 'R_IPCv', 'R_IPCd', 'R_SPC', 'R_SPCm', 'R_PCm', 'R_PCC',
'R_Sv', 'R_Sdl', 'R_Sdm', 'R_Mv', 'R_Mdl', 'R_Mdm', 'R_PMrv', 'R_PMdl',
'R_PMdm', 'R_PFcdl', 'R_PFcdm', 'R_MCC', 'R_PFrvl', 'R_Pfrdli', 'R_Pfrdls',
'R_PFrd', 'R_PFrm', 'R_OFCvl', 'R_OFCv', 'R_OFCvm', 'R_PFCvm', 'R_ACC',
'R_Insula', 'L_Thal', 'L_Cd', 'L_Put', 'L_GP', 'L_Hipp', 'L_Amyg', 'L_NAc',
'R_Thal', 'R_Cd', 'R_Put', 'R_GP', 'R_Hipp', 'R_Amyg', 'R_NAc']
"""
Convert the CONFIG dict to disable for the user to add new keys but still
allow to change values.
"""
class FixedDict(dict):
"""Dictionary with fixed keys."""
def __init__(self, dictionary):
dict.__init__(self)
for key in dictionary.keys():
dict.__setitem__(self, key, dictionary[key])
def __setitem__(self, key, item):
if key not in self:
raise IOError("New CONFIG keys are not allowed.")
dict.__setitem__(self, key, item)
CONFIG = FixedDict(CONFIG)
|
the-stack_0_8883 | import math
import operator
import sys
import pickle
import multiprocessing
import ctypes
import warnings
from distutils.version import LooseVersion
import re
import numpy as np
from numba import njit, jit, vectorize, guvectorize, objmode
from numba.core import types, errors, typing, compiler, cgutils
from numba.core.typed_passes import type_inference_stage
from numba.core.registry import cpu_target
from numba.core.compiler import compile_isolated
from numba.tests.support import (
TestCase,
captured_stdout,
temp_directory,
override_config,
run_in_new_process_in_cache_dir,
)
from numba.core.errors import LoweringError
import unittest
from numba.extending import (
typeof_impl,
type_callable,
lower_builtin,
lower_cast,
overload,
overload_attribute,
overload_method,
models,
register_model,
box,
unbox,
NativeValue,
intrinsic,
_Intrinsic,
register_jitable,
get_cython_function_address,
is_jitted,
)
from numba.core.typing.templates import (
ConcreteTemplate,
signature,
infer,
infer_global,
AbstractTemplate,
)
# Pandas-like API implementation
from .pdlike_usecase import Index, Series
try:
import scipy
if LooseVersion(scipy.__version__) < "0.19":
sc = None
else:
import scipy.special.cython_special as sc
except ImportError:
sc = None
# -----------------------------------------------------------------------
# Define a custom type and an implicit cast on it
class MyDummy(object):
pass
class MyDummyType(types.Opaque):
def can_convert_to(self, context, toty):
if isinstance(toty, types.Number):
from numba.core.typeconv import Conversion
return Conversion.safe
mydummy_type = MyDummyType("mydummy")
mydummy = MyDummy()
@typeof_impl.register(MyDummy)
def typeof_mydummy(val, c):
return mydummy_type
@lower_cast(MyDummyType, types.Number)
def mydummy_to_number(context, builder, fromty, toty, val):
"""
Implicit conversion from MyDummy to int.
"""
return context.get_constant(toty, 42)
def get_dummy():
return mydummy
register_model(MyDummyType)(models.OpaqueModel)
@unbox(MyDummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a second custom type but w/o implicit cast to Number
def base_dummy_type_factory(name):
class DynType(object):
pass
class DynTypeType(types.Opaque):
pass
dyn_type_type = DynTypeType(name)
@typeof_impl.register(DynType)
def typeof_mydummy(val, c):
return dyn_type_type
register_model(DynTypeType)(models.OpaqueModel)
return DynTypeType, DynType, dyn_type_type
MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2")
@unbox(MyDummyType2)
def unbox_index2(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a function's typing and implementation using the classical
# two-step API
def func1(x=None):
raise NotImplementedError
def type_func1_(context):
def typer(x=None):
if x in (None, types.none):
# 0-arg or 1-arg with None
return types.int32
elif isinstance(x, types.Float):
# 1-arg with float
return x
return typer
type_func1 = type_callable(func1)(type_func1_)
@lower_builtin(func1)
@lower_builtin(func1, types.none)
def func1_nullary(context, builder, sig, args):
return context.get_constant(sig.return_type, 42)
@lower_builtin(func1, types.Float)
def func1_unary(context, builder, sig, args):
def func1_impl(x):
return math.sqrt(2 * x)
return context.compile_internal(builder, func1_impl, sig, args)
# We can do the same for a known internal operation, here "print_item"
# which we extend to support MyDummyType.
@infer
class PrintDummy(ConcreteTemplate):
key = "print_item"
cases = [signature(types.none, mydummy_type)]
@lower_builtin("print_item", MyDummyType)
def print_dummy(context, builder, sig, args):
[x] = args
pyapi = context.get_python_api(builder)
strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
pyapi.print_object(strobj)
pyapi.decref(strobj)
return context.get_dummy_value()
# -----------------------------------------------------------------------
# Define an overloaded function (combined API)
def where(cond, x, y):
raise NotImplementedError
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y)
def call_where(cond, x, y):
return where(cond, y=y, x=x)
@overload(where)
def overload_where_arrays(cond, x, y):
"""
Implement where() for arrays.
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
if x.dtype != y.dtype:
raise errors.TypingError("x and y should have the same dtype")
# Array where() => return an array of the same shape
if all(ty.layout == "C" for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
return where_impl
# We can define another overload function for the same function, they
# will be tried in turn until one succeeds.
@overload(where)
def overload_where_scalars(cond, x, y):
"""
Implement where() for scalars.
"""
if not isinstance(cond, types.Array):
if x != y:
raise errors.TypingError("x and y should have the same type")
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
# Can't use full_like() on Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
return where_impl
# -----------------------------------------------------------------------
# Overload an already defined built-in function, extending it for new types.
@overload(len)
def overload_len_dummy(arg):
if isinstance(arg, MyDummyType):
def len_impl(arg):
return 13
return len_impl
@overload(operator.add)
def overload_add_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_add_impl(arg1, arg2):
return 42
return dummy_add_impl
@overload(operator.delitem)
def overload_dummy_delitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_delitem_impl(obj, idx):
print("del", obj, idx)
return dummy_delitem_impl
@overload(operator.getitem)
def overload_dummy_getitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_getitem_impl(obj, idx):
return idx + 123
return dummy_getitem_impl
@overload(operator.setitem)
def overload_dummy_setitem(obj, idx, val):
if all(
[
isinstance(obj, MyDummyType),
isinstance(idx, types.Integer),
isinstance(val, types.Integer),
]
):
def dummy_setitem_impl(obj, idx, val):
print(idx, val)
return dummy_setitem_impl
def call_add_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_add_binop(arg1, arg2):
return arg1 + arg2
@overload(operator.iadd)
def overload_iadd_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_iadd_impl(arg1, arg2):
return 42
return dummy_iadd_impl
def call_iadd_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_iadd_binop(arg1, arg2):
arg1 += arg2
return arg1
def call_delitem(obj, idx):
del obj[idx]
def call_getitem(obj, idx):
return obj[idx]
def call_setitem(obj, idx, val):
obj[idx] = val
@overload_method(MyDummyType, "length")
def overload_method_length(arg):
def imp(arg):
return len(arg)
return imp
def cache_overload_method_usecase(x):
return x.length()
def call_func1_nullary():
return func1()
def call_func1_unary(x):
return func1(x)
def len_usecase(x):
return len(x)
def print_usecase(x):
print(x)
def getitem_usecase(x, key):
return x[key]
def npyufunc_usecase(x):
return np.cos(np.sin(x))
def get_data_usecase(x):
return x._data
def get_index_usecase(x):
return x._index
def is_monotonic_usecase(x):
return x.is_monotonic_increasing
def make_series_usecase(data, index):
return Series(data, index)
def clip_usecase(x, lo, hi):
return x.clip(lo, hi)
# -----------------------------------------------------------------------
def return_non_boxable():
return np
@overload(return_non_boxable)
def overload_return_non_boxable():
def imp():
return np
return imp
def non_boxable_ok_usecase(sz):
mod = return_non_boxable()
return mod.arange(sz)
def non_boxable_bad_usecase():
return return_non_boxable()
def mk_func_input(f):
pass
@infer_global(mk_func_input)
class MkFuncTyping(AbstractTemplate):
def generic(self, args, kws):
assert isinstance(args[0], types.MakeFunctionLiteral)
return signature(types.none, *args)
def mk_func_test_impl():
mk_func_input(lambda a: a)
# -----------------------------------------------------------------------
@overload(np.exp)
def overload_np_exp(obj):
if isinstance(obj, MyDummyType):
def imp(obj):
# Returns a constant if a MyDummyType is seen
return 0xDEADBEEF
return imp
class TestLowLevelExtending(TestCase):
"""
Test the low-level two-tier extension API.
"""
# We check with both @jit and compile_isolated(), to exercise the
# registration logic.
def test_func1(self):
pyfunc = call_func1_nullary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), 42)
pyfunc = call_func1_unary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(None), 42)
self.assertPreciseEqual(cfunc(18.0), 6.0)
def test_func1_isolated(self):
pyfunc = call_func1_nullary
cr = compile_isolated(pyfunc, ())
self.assertPreciseEqual(cr.entry_point(), 42)
pyfunc = call_func1_unary
cr = compile_isolated(pyfunc, (types.float64,))
self.assertPreciseEqual(cr.entry_point(18.0), 6.0)
def test_type_callable_keeps_function(self):
self.assertIs(type_func1, type_func1_)
self.assertIsNotNone(type_func1)
def test_cast_mydummy(self):
pyfunc = get_dummy
cr = compile_isolated(pyfunc, (), types.float64)
self.assertPreciseEqual(cr.entry_point(), 42.0)
def test_mk_func_literal(self):
"""make sure make_function is passed to typer class as a literal
"""
test_ir = compiler.run_frontend(mk_func_test_impl)
typingctx = cpu_target.typing_context
typingctx.refresh()
typemap, _, _ = type_inference_stage(typingctx, test_ir, (), None)
self.assertTrue(
any(
isinstance(a, types.MakeFunctionLiteral)
for a in typemap.values()
)
)
class TestPandasLike(TestCase):
"""
Test implementing a pandas-like Index object.
Also stresses most of the high-level API.
"""
def test_index_len(self):
i = Index(np.arange(3))
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(i), 3)
def test_index_getitem(self):
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(getitem_usecase)
self.assertPreciseEqual(cfunc(i, 1), 8)
ii = cfunc(i, slice(1, None))
self.assertIsInstance(ii, Index)
self.assertEqual(list(ii), [8, -5])
def test_index_ufunc(self):
"""
Check Numpy ufunc on an Index object.
"""
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(npyufunc_usecase)
ii = cfunc(i)
self.assertIsInstance(ii, Index)
self.assertPreciseEqual(ii._data, np.cos(np.sin(i._data)))
def test_index_get_data(self):
# The _data attribute is exposed with make_attribute_wrapper()
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(get_data_usecase)
data = cfunc(i)
self.assertIs(data, i._data)
def test_index_is_monotonic(self):
# The is_monotonic_increasing attribute is exposed with
# overload_attribute()
cfunc = jit(nopython=True)(is_monotonic_usecase)
for values, expected in [
([8, 42, 5], False),
([5, 8, 42], True),
([], True),
]:
i = Index(np.int32(values))
got = cfunc(i)
self.assertEqual(got, expected)
def test_series_len(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(s), 3)
def test_series_get_index(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(get_index_usecase)
got = cfunc(s)
self.assertIsInstance(got, Index)
self.assertIs(got._data, i._data)
def test_series_ufunc(self):
"""
Check Numpy ufunc on an Series object.
"""
i = Index(np.int32([42, 8, -5]))
s = Series(np.int64([1, 2, 3]), i)
cfunc = jit(nopython=True)(npyufunc_usecase)
ss = cfunc(s)
self.assertIsInstance(ss, Series)
self.assertIsInstance(ss._index, Index)
self.assertIs(ss._index._data, i._data)
self.assertPreciseEqual(ss._values, np.cos(np.sin(s._values)))
def test_series_constructor(self):
i = Index(np.int32([42, 8, -5]))
d = np.float64([1.5, 4.0, 2.5])
cfunc = jit(nopython=True)(make_series_usecase)
got = cfunc(d, i)
self.assertIsInstance(got, Series)
self.assertIsInstance(got._index, Index)
self.assertIs(got._index._data, i._data)
self.assertIs(got._values, d)
def test_series_clip(self):
i = Index(np.int32([42, 8, -5]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(clip_usecase)
ss = cfunc(s, 1.6, 3.0)
self.assertIsInstance(ss, Series)
self.assertIsInstance(ss._index, Index)
self.assertIs(ss._index._data, i._data)
self.assertPreciseEqual(ss._values, np.float64([1.6, 3.0, 2.5]))
class TestHighLevelExtending(TestCase):
"""
Test the high-level combined API.
"""
def test_where(self):
"""
Test implementing a function with @overload.
"""
pyfunc = call_where
cfunc = jit(nopython=True)(pyfunc)
def check(*args, **kwargs):
expected = np_where(*args, **kwargs)
got = cfunc(*args, **kwargs)
self.assertPreciseEqual(expected, got)
check(x=3, cond=True, y=8)
check(True, 3, 8)
check(
np.bool_([True, False, True]),
np.int32([1, 2, 3]),
np.int32([4, 5, 5]),
)
# The typing error is propagated
with self.assertRaises(errors.TypingError) as raises:
cfunc(np.bool_([]), np.int32([]), np.int64([]))
self.assertIn(
"x and y should have the same dtype", str(raises.exception)
)
def test_len(self):
"""
Test re-implementing len() for a custom type with @overload.
"""
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(MyDummy()), 13)
self.assertPreciseEqual(cfunc([4, 5]), 2)
def test_print(self):
"""
Test re-implementing print() for a custom type with @overload.
"""
cfunc = jit(nopython=True)(print_usecase)
with captured_stdout():
cfunc(MyDummy())
self.assertEqual(sys.stdout.getvalue(), "hello!\n")
def test_add_operator(self):
"""
Test re-implementing operator.add() for a custom type with @overload.
"""
pyfunc = call_add_operator
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_add_binop(self):
"""
Test re-implementing '+' for a custom type via @overload(operator.add).
"""
pyfunc = call_add_binop
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_iadd_operator(self):
"""
Test re-implementing operator.add() for a custom type with @overload.
"""
pyfunc = call_iadd_operator
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_iadd_binop(self):
"""
Test re-implementing '+' for a custom type via @overload(operator.add).
"""
pyfunc = call_iadd_binop
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_delitem(self):
pyfunc = call_delitem
cfunc = jit(nopython=True)(pyfunc)
obj = MyDummy()
e = None
with captured_stdout() as out:
try:
cfunc(obj, 321)
except Exception as exc:
e = exc
if e is not None:
raise e
self.assertEqual(out.getvalue(), "del hello! 321\n")
def test_getitem(self):
pyfunc = call_getitem
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(MyDummy(), 321), 321 + 123)
def test_setitem(self):
pyfunc = call_setitem
cfunc = jit(nopython=True)(pyfunc)
obj = MyDummy()
e = None
with captured_stdout() as out:
try:
cfunc(obj, 321, 123)
except Exception as exc:
e = exc
if e is not None:
raise e
self.assertEqual(out.getvalue(), "321 123\n")
def test_no_cpython_wrapper(self):
"""
Test overloading whose return value cannot be represented in CPython.
"""
# Test passing Module type from a @overload implementation to ensure
# that the *no_cpython_wrapper* flag works
ok_cfunc = jit(nopython=True)(non_boxable_ok_usecase)
n = 10
got = ok_cfunc(n)
expect = non_boxable_ok_usecase(n)
np.testing.assert_equal(expect, got)
# Verify that the Module type cannot be returned to CPython
bad_cfunc = jit(nopython=True)(non_boxable_bad_usecase)
with self.assertRaises(TypeError) as raises:
bad_cfunc()
errmsg = str(raises.exception)
expectmsg = "cannot convert native Module"
self.assertIn(expectmsg, errmsg)
def test_typing_vs_impl_signature_mismatch_handling(self):
"""
Tests that an overload which has a differing typing and implementing
signature raises an exception.
"""
def gen_ol(impl=None):
def myoverload(a, b, c, kw=None):
pass
@overload(myoverload)
def _myoverload_impl(a, b, c, kw=None):
return impl
@jit(nopython=True)
def foo(a, b, c, d):
myoverload(a, b, c, kw=d)
return foo
sentinel = "Typing and implementation arguments differ in"
# kwarg value is different
def impl1(a, b, c, kw=12):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl1)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument default values", msg)
self.assertIn('<Parameter "kw=12">', msg)
self.assertIn('<Parameter "kw=None">', msg)
# kwarg name is different
def impl2(a, b, c, kwarg=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl2)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "kwarg=None">', msg)
self.assertIn('<Parameter "kw=None">', msg)
# arg name is different
def impl3(z, b, c, kw=None):
if a > 10: # noqa: F821
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl3)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "a">', msg)
self.assertIn('<Parameter "z">', msg)
from .overload_usecases import impl4, impl5
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl4)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn("First difference: 'z'", msg)
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl5)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "a">', msg)
self.assertIn('<Parameter "z">', msg)
# too many args
def impl6(a, b, c, d, e, kw=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl6)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "d">', msg)
self.assertIn('<Parameter "e">', msg)
# too few args
def impl7(a, b, kw=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl7)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "c">', msg)
# too many kwargs
def impl8(a, b, c, kw=None, extra_kwarg=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl8)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "extra_kwarg=None">', msg)
# too few kwargs
def impl9(a, b, c):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl9)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "kw=None">', msg)
def test_typing_vs_impl_signature_mismatch_handling_var_positional(self):
"""
Tests that an overload which has a differing typing and implementing
signature raises an exception and uses VAR_POSITIONAL (*args) in typing
"""
def myoverload(a, kw=None):
pass
from .overload_usecases import var_positional_impl
overload(myoverload)(var_positional_impl)
@jit(nopython=True)
def foo(a, b):
return myoverload(a, b, 9, kw=11)
with self.assertRaises(errors.TypingError) as e:
foo(1, 5)
msg = str(e.exception)
self.assertIn("VAR_POSITIONAL (e.g. *args) argument kind", msg)
self.assertIn("offending argument name is '*star_args_token'", msg)
def test_typing_vs_impl_signature_mismatch_handling_var_keyword(self):
"""
Tests that an overload which uses **kwargs (VAR_KEYWORD)
"""
def gen_ol(impl, strict=True):
def myoverload(a, kw=None):
pass
overload(myoverload, strict=strict)(impl)
@jit(nopython=True)
def foo(a, b):
return myoverload(a, kw=11)
return foo
# **kwargs in typing
def ol1(a, **kws):
def impl(a, kw=10):
return a
return impl
gen_ol(ol1, False)(1, 2) # no error if strictness not enforced
with self.assertRaises(errors.TypingError) as e:
gen_ol(ol1)(1, 2)
msg = str(e.exception)
self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg)
self.assertIn("offending argument name is '**kws'", msg)
# **kwargs in implementation
def ol2(a, kw=0):
def impl(a, **kws):
return a
return impl
with self.assertRaises(errors.TypingError) as e:
gen_ol(ol2)(1, 2)
msg = str(e.exception)
self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg)
self.assertIn("offending argument name is '**kws'", msg)
def test_overload_method_kwargs(self):
# Issue #3489
@overload_method(types.Array, "foo")
def fooimpl(arr, a_kwarg=10):
def impl(arr, a_kwarg=10):
return a_kwarg
return impl
@njit
def bar(A):
return A.foo(), A.foo(20), A.foo(a_kwarg=30)
Z = np.arange(5)
self.assertEqual(bar(Z), (10, 20, 30))
def test_overload_method_literal_unpack(self):
# Issue #3683
@overload_method(types.Array, "litfoo")
def litfoo(arr, val):
# Must be an integer
if isinstance(val, types.Integer):
# Must not be literal
if not isinstance(val, types.Literal):
def impl(arr, val):
return val
return impl
@njit
def bar(A):
return A.litfoo(0xCAFE)
A = np.zeros(1)
bar(A)
self.assertEqual(bar(A), 0xCAFE)
def test_overload_ufunc(self):
# Issue #4133.
# Use an extended type (MyDummyType) to use with a customized
# ufunc (np.exp).
@njit
def test():
return np.exp(mydummy)
self.assertEqual(test(), 0xDEADBEEF)
def test_overload_method_stararg(self):
@overload_method(MyDummyType, "method_stararg")
def _ov_method_stararg(obj, val, val2, *args):
def get(obj, val, val2, *args):
return (val, val2, args)
return get
@njit
def foo(obj, *args):
# Test with expanding stararg
return obj.method_stararg(*args)
obj = MyDummy()
self.assertEqual(foo(obj, 1, 2), (1, 2, ()))
self.assertEqual(foo(obj, 1, 2, 3), (1, 2, (3,)))
self.assertEqual(foo(obj, 1, 2, 3, 4), (1, 2, (3, 4)))
@njit
def bar(obj):
# Test with explicit argument
return (
obj.method_stararg(1, 2),
obj.method_stararg(1, 2, 3),
obj.method_stararg(1, 2, 3, 4),
)
self.assertEqual(
bar(obj), ((1, 2, ()), (1, 2, (3,)), (1, 2, (3, 4))),
)
# Check cases that put tuple type into stararg
# NOTE: the expected result has an extra tuple because of stararg.
self.assertEqual(
foo(obj, 1, 2, (3,)), (1, 2, ((3,),)),
)
self.assertEqual(
foo(obj, 1, 2, (3, 4)), (1, 2, ((3, 4),)),
)
self.assertEqual(
foo(obj, 1, 2, (3, (4, 5))), (1, 2, ((3, (4, 5)),)),
)
def _assert_cache_stats(cfunc, expect_hit, expect_misses):
hit = cfunc._cache_hits[cfunc.signatures[0]]
if hit != expect_hit:
raise AssertionError("cache not used")
miss = cfunc._cache_misses[cfunc.signatures[0]]
if miss != expect_misses:
raise AssertionError("cache not used")
class TestOverloadMethodCaching(TestCase):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
def test_caching_overload_method(self):
self._cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", self._cache_dir):
self.run_caching_overload_method()
def run_caching_overload_method(self):
cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase)
self.assertPreciseEqual(cfunc(MyDummy()), 13)
_assert_cache_stats(cfunc, 0, 1)
llvmir = cfunc.inspect_llvm((mydummy_type,))
# Ensure the inner method is not a declaration
decls = [
ln
for ln in llvmir.splitlines()
if ln.startswith("declare") and "overload_method_length" in ln
]
self.assertEqual(len(decls), 0)
# Test in a separate process
try:
ctx = multiprocessing.get_context("spawn")
except AttributeError:
ctx = multiprocessing
q = ctx.Queue()
p = ctx.Process(
target=run_caching_overload_method, args=(q, self._cache_dir)
)
p.start()
q.put(MyDummy())
p.join()
# Ensure subprocess exited normally
self.assertEqual(p.exitcode, 0)
res = q.get(timeout=1)
self.assertEqual(res, 13)
def run_caching_overload_method(q, cache_dir):
"""
Used by TestOverloadMethodCaching.test_caching_overload_method
"""
with override_config("CACHE_DIR", cache_dir):
arg = q.get()
cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase)
res = cfunc(arg)
q.put(res)
# Check cache stat
_assert_cache_stats(cfunc, 1, 0)
class TestIntrinsic(TestCase):
def test_void_return(self):
"""
Verify that returning a None from codegen function is handled
automatically for void functions, otherwise raise exception.
"""
@intrinsic
def void_func(typingctx, a):
sig = types.void(types.int32)
def codegen(context, builder, signature, args):
pass # do nothing, return None, should be turned into
# dummy value
return sig, codegen
@intrinsic
def non_void_func(typingctx, a):
sig = types.int32(types.int32)
def codegen(context, builder, signature, args):
pass # oops, should be returning a value here, raise exception
return sig, codegen
@jit(nopython=True)
def call_void_func():
void_func(1)
return 0
@jit(nopython=True)
def call_non_void_func():
non_void_func(1)
return 0
# void func should work
self.assertEqual(call_void_func(), 0)
# not void function should raise exception
with self.assertRaises(LoweringError) as e:
call_non_void_func()
self.assertIn("non-void function returns None", e.exception.msg)
def test_ll_pointer_cast(self):
"""
Usecase test: custom reinterpret cast to turn int values to pointers
"""
from ctypes import CFUNCTYPE, POINTER, c_float, c_int
# Use intrinsic to make a reinterpret_cast operation
def unsafe_caster(result_type):
assert isinstance(result_type, types.CPointer)
@intrinsic
def unsafe_cast(typingctx, src):
self.assertIsInstance(typingctx, typing.Context)
if isinstance(src, types.Integer):
sig = result_type(types.uintp)
# defines the custom code generation
def codegen(context, builder, signature, args):
[src] = args
rtype = signature.return_type
llrtype = context.get_value_type(rtype)
return builder.inttoptr(src, llrtype)
return sig, codegen
return unsafe_cast
# make a nopython function to use our cast op.
# this is not usable from cpython due to the returning of a pointer.
def unsafe_get_ctypes_pointer(src):
raise NotImplementedError("not callable from python")
@overload(unsafe_get_ctypes_pointer, strict=False)
def array_impl_unsafe_get_ctypes_pointer(arrtype):
if isinstance(arrtype, types.Array):
unsafe_cast = unsafe_caster(types.CPointer(arrtype.dtype))
def array_impl(arr):
return unsafe_cast(src=arr.ctypes.data)
return array_impl
# the ctype wrapped function for use in nopython mode
def my_c_fun_raw(ptr, n):
for i in range(n):
print(ptr[i])
prototype = CFUNCTYPE(None, POINTER(c_float), c_int)
my_c_fun = prototype(my_c_fun_raw)
# Call our pointer-cast in a @jit compiled function and use
# the pointer in a ctypes function
@jit(nopython=True)
def foo(arr):
ptr = unsafe_get_ctypes_pointer(arr)
my_c_fun(ptr, arr.size)
# Test
arr = np.arange(10, dtype=np.float32)
with captured_stdout() as buf:
foo(arr)
got = buf.getvalue().splitlines()
buf.close()
expect = list(map(str, arr))
self.assertEqual(expect, got)
def test_serialization(self):
"""
Test serialization of intrinsic objects
"""
# define a intrinsic
@intrinsic
def identity(context, x):
def codegen(context, builder, signature, args):
return args[0]
sig = x(x)
return sig, codegen
# use in a jit function
@jit(nopython=True)
def foo(x):
return identity(x)
self.assertEqual(foo(1), 1)
# get serialization memo
memo = _Intrinsic._memo
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
memo_size += 1
self.assertEqual(memo_size, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size, len(memo))
# check rebuilt foo
self.assertEqual(foo(1), foo_rebuilt(1))
# pickle identity directly
serialized_identity = pickle.dumps(identity)
# memo size unchanged
self.assertEqual(memo_size, len(memo))
# unpickle
identity_rebuilt = pickle.loads(serialized_identity)
# must be the same object
self.assertIs(identity, identity_rebuilt)
# memo size unchanged
self.assertEqual(memo_size, len(memo))
def test_deserialization(self):
"""
Test deserialization of intrinsic
"""
def defn(context, x):
def codegen(context, builder, signature, args):
return args[0]
return x(x), codegen
memo = _Intrinsic._memo
memo_size = len(memo)
# invoke _Intrinsic indirectly to avoid registration which keeps an
# internal reference inside the compiler
original = _Intrinsic("foo", defn)
self.assertIs(original._defn, defn)
pickled = pickle.dumps(original)
# by pickling, a new memo entry is created
memo_size += 1
self.assertEqual(memo_size, len(memo))
del original # remove original before unpickling
# by deleting, the memo entry is NOT removed due to recent
# function queue
self.assertEqual(memo_size, len(memo))
# Manually force clear of _recent queue
_Intrinsic._recent.clear()
memo_size -= 1
self.assertEqual(memo_size, len(memo))
rebuilt = pickle.loads(pickled)
# verify that the rebuilt object is different
self.assertIsNot(rebuilt._defn, defn)
# the second rebuilt object is the same as the first
second = pickle.loads(pickled)
self.assertIs(rebuilt._defn, second._defn)
class TestRegisterJitable(unittest.TestCase):
def test_no_flags(self):
@register_jitable
def foo(x, y):
return x + y
def bar(x, y):
return foo(x, y)
cbar = jit(nopython=True)(bar)
expect = bar(1, 2)
got = cbar(1, 2)
self.assertEqual(expect, got)
def test_flags_no_nrt(self):
@register_jitable(_nrt=False)
def foo(n):
return np.arange(n)
def bar(n):
return foo(n)
self.assertEqual(bar(3).tolist(), [0, 1, 2])
cbar = jit(nopython=True)(bar)
with self.assertRaises(errors.TypingError) as raises:
cbar(2)
msg = (
"Only accept returning of array passed into the function as "
"argument"
)
self.assertIn(msg, str(raises.exception))
class TestImportCythonFunction(unittest.TestCase):
@unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed")
def test_getting_function(self):
addr = get_cython_function_address(
"scipy.special.cython_special", "j0"
)
functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)
_j0 = functype(addr)
j0 = jit(nopython=True)(lambda x: _j0(x))
self.assertEqual(j0(0), 1)
def test_missing_module(self):
with self.assertRaises(ImportError) as raises:
get_cython_function_address("fakemodule", "fakefunction")
# The quotes are not there in Python 2
msg = "No module named '?fakemodule'?"
match = re.match(msg, str(raises.exception))
self.assertIsNotNone(match)
@unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed")
def test_missing_function(self):
with self.assertRaises(ValueError) as raises:
get_cython_function_address(
"scipy.special.cython_special", "foo"
)
msg = (
"No function 'foo' found in __pyx_capi__ of "
"'scipy.special.cython_special'"
)
self.assertEqual(msg, str(raises.exception))
@overload_method(
MyDummyType, "method_jit_option_check_nrt", jit_options={"_nrt": True}
)
def ov_method_jit_option_check_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_method(
MyDummyType, "method_jit_option_check_no_nrt", jit_options={"_nrt": False}
)
def ov_method_jit_option_check_no_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_attribute(
MyDummyType, "attr_jit_option_check_nrt", jit_options={"_nrt": True}
)
def ov_attr_jit_option_check_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_attribute(
MyDummyType, "attr_jit_option_check_no_nrt", jit_options={"_nrt": False}
)
def ov_attr_jit_option_check_no_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
class TestJitOptionsNoNRT(TestCase):
# Test overload*(jit_options={...}) by turning off _nrt
def check_error_no_nrt(self, func, *args, **kwargs):
# Check that the compilation fails with a complaint about dynamic array
msg = (
"Only accept returning of array passed into "
"the function as argument"
)
with self.assertRaises(errors.TypingError) as raises:
func(*args, **kwargs)
self.assertIn(msg, str(raises.exception))
def no_nrt_overload_check(self, flag):
def dummy():
return np.arange(10)
@overload(dummy, jit_options={"_nrt": flag})
def ov_dummy():
def dummy():
return np.arange(10)
return dummy
@njit
def foo():
return dummy()
if flag:
self.assertPreciseEqual(foo(), np.arange(10))
else:
self.check_error_no_nrt(foo)
def test_overload_no_nrt(self):
self.no_nrt_overload_check(True)
self.no_nrt_overload_check(False)
def test_overload_method_no_nrt(self):
@njit
def udt(x):
return x.method_jit_option_check_nrt()
self.assertPreciseEqual(udt(mydummy), np.arange(10))
@njit
def udt(x):
return x.method_jit_option_check_no_nrt()
self.check_error_no_nrt(udt, mydummy)
def test_overload_attribute_no_nrt(self):
@njit
def udt(x):
return x.attr_jit_option_check_nrt
self.assertPreciseEqual(udt(mydummy), np.arange(10))
@njit
def udt(x):
return x.attr_jit_option_check_no_nrt
self.check_error_no_nrt(udt, mydummy)
class TestBoxingCallingJIT(TestCase):
def setUp(self):
super().setUp()
many = base_dummy_type_factory("mydummy2")
self.DynTypeType, self.DynType, self.dyn_type_type = many
self.dyn_type = self.DynType()
def test_unboxer_basic(self):
# Implements an unboxer on DynType that calls an intrinsic into the
# unboxer code.
magic_token = 0xCAFE
magic_offset = 123
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
# proof that this is a jit'ed context by calling jit only
# intrinsic
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
out = passthru(self.dyn_type)
self.assertEqual(out, magic_token + magic_offset)
def test_unboxer_raise(self):
# Testing exception raising in jitcode called from unboxing.
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
if x > 0:
raise ValueError("cannot be x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot be x > 0", str(raises.exception),
)
def test_boxer(self):
# Call jitcode inside the boxer
magic_token = 0xCAFE
magic_offset = 312
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
# Note: this doesn't do proper error handling
def bridge(x):
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
r = passthru(self.dyn_type)
self.assertEqual(r, magic_token + magic_offset)
def test_boxer_raise(self):
# Call jitcode inside the boxer
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
def bridge(x):
if x > 0:
raise ValueError("cannot do x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
# The error handling
retval = cgutils.alloca_once(c.builder, c.pyapi.pyobj, zfill=True)
with c.builder.if_then(c.builder.not_(is_error)):
obj = c.pyapi.long_from_ssize_t(res)
c.builder.store(obj, retval)
return c.builder.load(retval)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot do x > 0", str(raises.exception),
)
def with_objmode_cache_ov_example(x):
# This is the function stub for overloading inside
# TestCachingOverloadObjmode.test_caching_overload_objmode
pass
class TestCachingOverloadObjmode(TestCase):
"""Test caching of the use of overload implementations that use
`with objmode`
"""
_numba_parallel_test_ = False
def setUp(self):
warnings.simplefilter("error", errors.NumbaWarning)
def tearDown(self):
warnings.resetwarnings()
def test_caching_overload_objmode(self):
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
def realwork(x):
# uses numpy code
arr = np.arange(x) / x
return np.linalg.norm(arr)
def python_code(x):
# create indirections
return realwork(x)
@overload(with_objmode_cache_ov_example)
def _ov_with_objmode_cache_ov_example(x):
def impl(x):
with objmode(y="float64"):
y = python_code(x)
return y
return impl
@njit(cache=True)
def testcase(x):
return with_objmode_cache_ov_example(x)
expect = realwork(123)
got = testcase(123)
self.assertEqual(got, expect)
testcase_cached = njit(cache=True)(testcase.py_func)
got = testcase_cached(123)
self.assertEqual(got, expect)
@classmethod
def check_objmode_cache_ndarray(cls):
def do_this(a, b):
return np.sum(a + b)
def do_something(a, b):
return np.sum(a + b)
@overload(do_something)
def overload_do_something(a, b):
def _do_something_impl(a, b):
with objmode(y='float64'):
y = do_this(a, b)
return y
return _do_something_impl
@njit(cache=True)
def test_caching():
a = np.arange(20)
b = np.arange(20)
return do_something(a, b)
got = test_caching()
expect = test_caching.py_func()
# Check result
if got != expect:
raise AssertionError("incorrect result")
return test_caching
@classmethod
def check_objmode_cache_ndarray_check_cache(cls):
disp = cls.check_objmode_cache_ndarray()
if len(disp.stats.cache_misses) != 0:
raise AssertionError('unexpected cache miss')
if len(disp.stats.cache_hits) <= 0:
raise AssertionError("unexpected missing cache hit")
def test_check_objmode_cache_ndarray(self):
# See issue #6130.
# Env is missing after cache load.
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
# Test in local process to populate the cache.
self.check_objmode_cache_ndarray()
# Run in new process to use the cache in a fresh process.
res = run_in_new_process_in_cache_dir(
self.check_objmode_cache_ndarray_check_cache, cache_dir
)
self.assertEqual(res['exitcode'], 0)
class TestMisc(TestCase):
def test_is_jitted(self):
def foo(x):
pass
self.assertFalse(is_jitted(foo))
self.assertTrue(is_jitted(njit(foo)))
self.assertFalse(is_jitted(vectorize(foo)))
self.assertFalse(is_jitted(vectorize(parallel=True)(foo)))
self.assertFalse(
is_jitted(guvectorize("void(float64[:])", "(m)")(foo))
)
class TestOverloadPreferLiteral(TestCase):
def test_overload(self):
def prefer_lit(x):
pass
def non_lit(x):
pass
def ov(x):
if isinstance(x, types.IntegerLiteral):
# With prefer_literal=False, this branch will not be reached.
if x.literal_value == 1:
def impl(x):
return 0xcafe
return impl
else:
raise errors.TypingError('literal value')
else:
def impl(x):
return x * 100
return impl
overload(prefer_lit, prefer_literal=True)(ov)
overload(non_lit)(ov)
@njit
def check_prefer_lit(x):
return prefer_lit(1), prefer_lit(2), prefer_lit(x)
a, b, c = check_prefer_lit(3)
self.assertEqual(a, 0xcafe)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
@njit
def check_non_lit(x):
return non_lit(1), non_lit(2), non_lit(x)
a, b, c = check_non_lit(3)
self.assertEqual(a, 100)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
def test_overload_method(self):
def ov(self, x):
if isinstance(x, types.IntegerLiteral):
# With prefer_literal=False, this branch will not be reached.
if x.literal_value == 1:
def impl(self, x):
return 0xcafe
return impl
else:
raise errors.TypingError('literal value')
else:
def impl(self, x):
return x * 100
return impl
overload_method(
MyDummyType, "method_prefer_literal",
prefer_literal=True,
)(ov)
overload_method(
MyDummyType, "method_non_literal",
prefer_literal=False,
)(ov)
@njit
def check_prefer_lit(dummy, x):
return (
dummy.method_prefer_literal(1),
dummy.method_prefer_literal(2),
dummy.method_prefer_literal(x),
)
a, b, c = check_prefer_lit(MyDummy(), 3)
self.assertEqual(a, 0xcafe)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
@njit
def check_non_lit(dummy, x):
return (
dummy.method_non_literal(1),
dummy.method_non_literal(2),
dummy.method_non_literal(x),
)
a, b, c = check_non_lit(MyDummy(), 3)
self.assertEqual(a, 100)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_8884 | from __future__ import print_function
import numpy as np
import argparse
import torch
import torch.utils.data as data_utils
import torch.optim as optim
from torch.autograd import Variable
from dataloader import MnistBags
from grape.grape_dataloader import VineBags
from model_old import Attention, GatedAttention
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST bags Example')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.0005)')
parser.add_argument('--reg', type=float, default=10e-5, metavar='R',
help='weight decay')
parser.add_argument('--target_number', type=int, default=9, metavar='T',
help='bags have a positive labels if they contain at least one 9')
parser.add_argument('--mean_bag_length', type=int, default=10, metavar='ML',
help='average bag length')
parser.add_argument('--var_bag_length', type=int, default=2, metavar='VL',
help='variance of bag length')
parser.add_argument('--num_bags_train', type=int, default=200, metavar='NTrain',
help='number of bags in training set')
parser.add_argument('--num_bags_test', type=int, default=50, metavar='NTest',
help='number of bags in test set')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--model', type=str, default='attention', help='Choose b/w attention and gated_attention')
parser.add_argument("--port", default=52720)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print('\nGPU is ON!')
print('Load Train and Test Set')
loader_kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = data_utils.DataLoader(MnistBags(target_number=args.target_number,
mean_bag_length=args.mean_bag_length,
var_bag_length=args.var_bag_length,
num_bag=args.num_bags_train,
seed=args.seed,
train=True),
batch_size=1,
shuffle=True,
**loader_kwargs)
test_loader = data_utils.DataLoader(MnistBags(target_number=args.target_number,
mean_bag_length=args.mean_bag_length,
var_bag_length=args.var_bag_length,
num_bag=args.num_bags_test,
seed=args.seed,
train=False),
batch_size=1,
shuffle=False,
**loader_kwargs)
print('Init Model')
if args.model == 'attention':
model = Attention()
elif args.model == 'gated_attention':
model = GatedAttention()
if args.cuda:
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.reg)
def train(epoch):
model.train()
train_loss = 0.
train_error = 0.
for batch_idx, (data, label) in enumerate(train_loader):
bag_label = label[0]
if args.cuda:
data, bag_label = data.cuda(), bag_label.cuda()
data, bag_label = Variable(data), Variable(bag_label)
# reset gradients
optimizer.zero_grad()
# calculate loss and metrics
loss, _ = model.calculate_objective(data, bag_label)
train_loss += loss.data[0]
error, _ = model.calculate_classification_error(data, bag_label)
train_error += error
# backward pass
loss.backward()
# step
optimizer.step()
# calculate loss and error for epoch
train_loss /= len(train_loader)
train_error /= len(train_loader)
print('Epoch: {}, Loss: {:.4f}, Train error: {:.4f}'.format(epoch, train_loss.cpu().numpy()[0], train_error))
def test():
model.eval()
test_loss = 0.
test_error = 0.
for batch_idx, (data, label) in enumerate(test_loader):
bag_label = label[0]
instance_labels = label[1]
if args.cuda:
data, bag_label = data.cuda(), bag_label.cuda()
data, bag_label = Variable(data), Variable(bag_label)
loss, attention_weights = model.calculate_objective(data, bag_label)
test_loss += loss.data[0]
error, predicted_label = model.calculate_classification_error(data, bag_label)
test_error += error
if batch_idx < 5: # plot bag labels and instance labels for first 5 bags
bag_level = (bag_label.cpu().data.numpy()[0], int(predicted_label.cpu().data.numpy()[0][0]))
instance_level = list(zip(instance_labels.numpy()[0].tolist(),
np.round(attention_weights.cpu().data.numpy()[0], decimals=3).tolist()))
print('\nTrue Bag Label, Predicted Bag Label: {}\n'
'True Instance Labels, Attention Weights: {}'.format(bag_level, instance_level))
test_error /= len(test_loader)
test_loss /= len(test_loader)
print('\nTest Set, Loss: {:.4f}, Test error: {:.4f}'.format(test_loss.cpu().numpy()[0], test_error))
if __name__ == "__main__":
print('Start Training')
for epoch in range(1, args.epochs + 1):
train(epoch)
print('Start Testing')
test()
|
the-stack_0_8885 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from pants.backend.python.subsystems.python_tool_base import PythonToolBase
from pants.backend.python.tasks.python_tool_prep_base import PythonToolInstance, PythonToolPrepBase
from pants.task.task import Task
from pants.util.contextutil import temporary_dir
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class Tool(PythonToolBase):
options_scope = 'test-tool'
# TODO: make a fake pex tool instead of depending on a real python requirement!
default_requirements = [
'pex==1.5.3',
]
default_entry_point = 'pex.bin.pex:main'
@classmethod
def register_options(cls, register):
super().register_options(register)
register('--needs-to-be-invoked-for-some-reason', type=bool, default=True)
class ToolInstance(PythonToolInstance):
pass
class ToolPrep(PythonToolPrepBase):
options_scope = 'tool-prep-task'
tool_subsystem_cls = Tool
tool_instance_cls = ToolInstance
def will_be_invoked(self):
return Tool.scoped_instance(self).get_options().needs_to_be_invoked_for_some_reason
class ToolTask(Task):
options_scope = 'tool-task'
@classmethod
def prepare(cls, options, round_manager):
super().prepare(options, round_manager)
round_manager.require_data(ToolPrep.tool_instance_cls)
def execute(self):
tool_for_pex = self.context.products.get_data(ToolPrep.tool_instance_cls)
stdout, _, exit_code, _ = tool_for_pex.output(['--version'])
assert re.match(r'.*\.pex 1.5.3', stdout)
assert 0 == exit_code
class PythonToolPrepTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return ToolTask
def _assert_tool_execution_for_python_version(self, use_py3=True):
scope_string = '3' if use_py3 else '2'
constraint_string = 'CPython>=3' if use_py3 else 'CPython<3'
tool_prep_type = self.synthesize_task_subtype(ToolPrep, 'tp_scope_py{}'.format(scope_string))
with temporary_dir() as tmp_dir:
context = self.context(for_task_types=[tool_prep_type], for_subsystems=[Tool], options={
'': {
'pants_bootstrapdir': tmp_dir,
},
'test-tool': {
'interpreter_constraints': [constraint_string],
},
})
tool_prep_task = tool_prep_type(context, os.path.join(
self.pants_workdir, 'tp_py{}'.format(scope_string)))
tool_prep_task.execute()
pex_tool = context.products.get_data(ToolPrep.tool_instance_cls)
self.assertIsNotNone(pex_tool)
# Check that the tool can be created and executed successfully.
self.create_task(context).execute()
# Check that our pex tool wrapper was constructed with the expected interpreter.
self.assertTrue(pex_tool.interpreter.identity.matches(constraint_string))
return pex_tool
def test_tool_execution(self):
"""Test that python tools are fingerprinted by python interpreter."""
py3_pex_tool = self._assert_tool_execution_for_python_version(use_py3=True)
py3_pex_tool_path = py3_pex_tool.pex.path()
self.assertTrue(os.path.isdir(py3_pex_tool_path))
py2_pex_tool = self._assert_tool_execution_for_python_version(use_py3=False)
py2_pex_tool_path = py2_pex_tool.pex.path()
self.assertTrue(os.path.isdir(py2_pex_tool_path))
self.assertNotEqual(py3_pex_tool_path, py2_pex_tool_path)
def test_tool_noop(self):
tool_prep_type = self.synthesize_task_subtype(ToolPrep, 'tool_prep')
context = self.context(for_task_types=[tool_prep_type], for_subsystems=[Tool], options={
'test-tool': {
'needs_to_be_invoked_for_some_reason': False,
},
})
tool_prep_task = tool_prep_type(context, os.path.join(self.pants_workdir, 'tool_prep_dir'))
tool_prep_task.execute()
self.assertIsNone(context.products.get_data(ToolPrep.tool_instance_cls))
|
the-stack_0_8888 | # orm/mapper.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
import types
import weakref
from itertools import chain
from collections import deque
from .. import sql, util, log, exc as sa_exc, event, schema, inspection
from ..sql import expression, visitors, operators, util as sql_util
from . import instrumentation, attributes, exc as orm_exc, loading
from . import properties
from . import util as orm_util
from .interfaces import MapperProperty, InspectionAttr, _MappedAttribute
from .base import _class_to_mapper, _state_mapper, class_mapper, \
state_str, _INSTRUMENTOR
from .path_registry import PathRegistry
import sys
_mapper_registry = weakref.WeakKeyDictionary()
_already_compiling = False
_memoized_configured_property = util.group_expirable_memoized_property()
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE')
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(InspectionAttr):
"""Define the correlation of class attributes to database table
columns.
The :class:`.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`.Mapper` which maintains it can be acquired
using the :func:`.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_new_mappers = False
def __init__(self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
extension=None,
order_by=False,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
"""Return a new :class:`~.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
.. seealso::
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`.Table` produced as a result of the ``__tablename__``
and :class:`.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension` instances which will be applied
to all operations by this :class:`.Mapper`. **Deprecated.**
Please see :class:`.MapperEvents`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding :class:`.Mapper`
of one indicating a superclass to which this :class:`.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphanable object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
.. versionadded:: 0.8 - the consideration of a pending object as
an "orphan" has been modified to more closely match the
behavior as that of persistent objects, which is that the object
is expunged from the :class:`.Session` as soon as it is
de-associated from any of its orphan-enabled parents. Previously,
the pending object would be expunged only if de-associated
from all of its orphan-enabled parents. The new flag
``legacy_is_orphan`` is added to :func:`.orm.mapper` which
re-establishes the legacy behavior.
:param non_primary: Specify that this :class:`.Mapper` is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
:paramref:`.Mapper.non_primary` is not an often used option, but
is useful in some specific :func:`.relationship` cases.
.. seealso::
:ref:`relationship_non_primary_mapper`
:param order_by: A single :class:`.Column` or list of :class:`.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`.relationship`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`.Column` object that's
present in the mapped :class:`.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
.. versionchanged:: 0.7.4
``polymorphic_on`` may be specified as a SQL expression,
or refer to any attribute configured with
:func:`.column_property`, or to the string name of one.
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that :class:`.Column`
objects present in
the mapped :class:`.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`.Column` objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, 'class_')
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
if order_by is not False:
self.order_by = util.to_list(order_by)
else:
self.order_by = order_by
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
self.local_table = local_table
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = expression._clause_element_as_expr(
polymorphic_on)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self._deprecated_extensions = util.to_list(extension or [])
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
self.configured = False
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
_CONFIGURE_MUTEX.acquire()
try:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_legacy_instrument_class()
self._configure_class_instrumentation()
self._configure_listeners()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
Mapper._new_mappers = True
self._log("constructed")
self._expire_memoizations()
finally:
_CONFIGURE_MUTEX.release()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`.Selectable` which this :class:`.Mapper` manages.
Typically is an instance of :class:`.Table` or :class:`.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`~.Mapper.mapped_table`.
"""
mapped_table = None
"""The :class:`.Selectable` to which this :class:`.Mapper` is mapped.
Typically an instance of :class:`.Table`, :class:`.Join`, or
:class:`.Alias`.
The "mapped" table is the selectable that
the mapper selects from during queries. For non-inheriting
mappers, the mapped table is the same as the "local" table.
For joined-table inheritance mappers, mapped_table references the
full :class:`.Join` representing full rows for this particular
subclass. For single-table inheritance mappers, mapped_table
references the base table.
.. seealso::
:attr:`~.Mapper.local_table`.
"""
inherits = None
"""References the :class:`.Mapper` which this :class:`.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = None
"""Represent ``True`` if this :class:`.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`.Table` objects
which this :class:`.Mapper` is aware of.
If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias`
representing a :class:`.Select`, the individual :class:`.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`.Column` objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`.Mapper`.
This list is against the selectable in :attr:`~.Mapper.mapped_table`. In
the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a :class:`.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`.Mapper`
features a ``primary_key`` argument that can override what the
:class:`.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`.Mapper` is a single table
inheritance mapper.
:attr:`~.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to selet rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`~.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`~.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`~.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`.Mapper`. In an inheritance scenario, it references
the :class:`.Mapper` which is parent to all other :class:`.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`.Column` or other scalar expression
objects maintained by this :class:`.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`.Table` object, except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`~.orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`~.Mapper.columns`."""
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inherting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'" %
(self.class_.__name__, self.inherits.class_.__name__))
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper" %
(np, self.class_.__name__, np))
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.mapped_table = self.inherits.mapped_table
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.mapped_table = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table,
self.local_table)
self.mapped_table = sql.join(
self.inherits.mapped_table,
self.local_table,
self.inherit_condition)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = \
sql_util.criterion_as_pairs(
self.mapped_table.onclause,
consider_as_foreign_keys=fks)
else:
self.mapped_table = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif self.inherits.version_id_col is not None and \
self.version_id_col is not self.inherits.version_id_col:
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning." %
(self.version_id_col.description,
self.inherits.version_id_col.description)
)
if self.order_by is False and \
not self.concrete and \
self.inherits.order_by is not False:
self.order_by = self.inherits.order_by
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity." %
(self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self, self.polymorphic_identity)
)
self.polymorphic_map[self.polymorphic_identity] = self
else:
self._all_tables = set()
self.base_mapper = self
self.mapped_table = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.mapped_table is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a mapped_table specified."
% self)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == '*':
self.with_polymorphic = ('*', None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(
with_polymorphic[0], util.string_types + (tuple, list)):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
if self.configured:
self._expire_memoizations()
def _set_concrete_base(self, mapper):
"""Set the given :class:`.Mapper` as the 'inherits' for this
:class:`.Mapper`, assuming this :class:`.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_legacy_instrument_class(self):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_instrument_class(self, ext)
def _configure_listeners(self):
if self.inherits:
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_listener(self, ext)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class_ and entity name.
Subsequent calls to ``class_mapper()`` for the class_/entity
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_)
self.class_manager = manager
self._identity_class = manager.mapper._identity_class
_mapper_registry[self] = True
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
"Use non_primary=True to "
"create a non primary Mapper. clear_mappers() will "
"remove *all* current mappers from all classes." %
self.class_)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
# note: this *must be called before instrumentation.register_class*
# to maintain the documented behavior of instrument_class
self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_)
self.class_manager = manager
manager.mapper = self
manager.deferred_scalar_loader = util.partial(
loading.load_scalar_attributes, self)
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
event.listen(manager, 'first_init', _event_on_first_init, raw=True)
event.listen(manager, 'init', _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
if hasattr(method, '__sa_reconstructor__'):
self._reconstructor = method
event.listen(manager, 'load', _event_on_load, raw=True)
elif hasattr(method, '__sa_validators__'):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
manager.info[_INSTRUMENTOR] = self
@classmethod
def _configure_all(cls):
"""Class-level path to the :func:`.configure_mappers` call.
"""
configure_mappers()
def dispose(self):
# Disable any attribute-based compilation.
self.configured = True
if hasattr(self, '_configure_failed'):
del self._configure_failed
if not self.non_primary and \
self.class_manager is not None and \
self.class_manager.is_mapped and \
self.class_manager.mapper is self:
instrumentation.unregister_class(self.class_)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.mapped_table)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(chain(*[
col.proxy_set for col in
self._columntoproperty]))
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.mapped_table])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = \
util.ordered_column_set(t.primary_key).\
intersection(pk_cols)
self._cols_by_table[t] = \
util.ordered_column_set(t.c).\
intersection(all_cols)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif self.mapped_table not in self._pks_by_table or \
len(self._pks_by_table[self.mapped_table]) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
elif self.local_table not in self._pks_by_table and \
isinstance(self.local_table, schema.Table):
util.warn("Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description)
if self.inherits and \
not self.concrete and \
not self._primary_key_argument:
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or mapped_table pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[self.mapped_table.corresponding_column(c) for c in
self._primary_key_argument],
ignore_nonexistent_tables=True)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.mapped_table],
ignore_nonexistent_tables=True)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props and
(not hasattr(col, 'table') or
col.table not in self._cols_by_table))
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
self.columns = self.c = util.OrderedProperties()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.mapped_table.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or '') + column.key
if self._should_exclude(
column.key, column_key,
local=self.local_table.c.contains_column(column),
column=column
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(column_key,
column,
init=False,
setparent=True)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError:
raise sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
polymorphic_key = prop.key
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(self.polymorphic_on,
properties.ColumnProperty):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on")
prop = self.polymorphic_on
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif not expression._is_column(self.polymorphic_on):
# polymorphic_on is not a Column and not a ColumnProperty;
# not supported right now.
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's mapped_table
col = self.mapped_table.corresponding_column(
self.polymorphic_on)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either mapped_table or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None or
self.with_polymorphic[1].
corresponding_column(col) is None):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, 'key', None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" %
col.key)
else:
self.polymorphic_on = col = \
col.label("_sa_polymorphic_on")
key = col.key
self._configure_property(
key,
properties.ColumnProperty(col,
_instrument=instrument),
init=init, setparent=True)
polymorphic_key = key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.mapped_table is mapper.mapped_table:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = \
self.mapped_table.corresponding_column(
mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = \
mapper._set_polymorphic_identity
self._validate_polymorphic_identity = \
mapper._validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state, dict_,
state.manager.mapper.polymorphic_identity,
None)
def _validate_polymorphic_identity(mapper, state, dict_):
if polymorphic_key in dict_ and \
dict_[polymorphic_key] not in \
mapper._acceptable_polymorphic_identities:
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key])
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = \
_validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@_memoized_configured_property
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@_memoized_configured_property
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.mapped_table is self.mapped_table:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@_memoized_configured_property
def _prop_set(self):
return frozenset(self._props.values())
def _adapt_inherited_property(self, key, prop, init):
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
init=init, setparent=True)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.mapped_table.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.mapped_table._reset_exported()
col = self.mapped_table.corresponding_column(
prop.columns[0])
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, '_readonly_props') and \
(not hasattr(col, 'table') or
col.table not in self._cols_by_table):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if hasattr(self, '_cols_by_table') and \
col.table in self._cols_by_table and \
col not in self._cols_by_table[col.table]:
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, '_is_polymorphic_discriminator'):
prop._is_polymorphic_discriminator = \
(col is self.polymorphic_on or
prop.columns[0] is self.polymorphic_on)
self.columns[key] = col
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and \
getattr(self._props[key], '_mapped_by_synonym', False):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if key in self._props and \
not isinstance(prop, properties.ColumnProperty) and \
not isinstance(self._props[key], properties.ColumnProperty):
util.warn("Property %s on %s being replaced with new "
"property %s; the old property will be discarded" % (
self._props[key],
self,
prop,
))
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProprerty` given a
:class:`.Column` object. """
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
if not expression._is_column(column):
raise sa_exc.ArgumentError(
"%s=%r is not an instance of MapperProperty or Column"
% (key, prop))
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
not self._inherits_equated_pairs or
(prop.columns[0], column) not in self._inherits_equated_pairs
) and \
not prop.columns[0].shares_lineage(column) and \
prop.columns[0] is not self.version_id_col and \
column is not self.version_id_col:
warn_only = prop.parent is not self
msg = ("Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key))
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log("inserting column to existing list "
"in properties.ColumnProperty %s" % (key))
return prop
elif prop is None or isinstance(prop,
properties.ConcreteInheritedProperty):
mapped_column = []
for c in columns:
mc = self.mapped_table.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.mapped_table._reset_exported()
mc = self.mapped_table.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c))
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." %
(key, self, column.key, prop))
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
_memoized_configured_property.expire_instance(mapper)
@property
def _log_desc(self):
return "(" + self.class_.__name__ + \
"|" + \
(self.local_table is not None and
self.local_table.description or
str(self.local_table)) +\
(self.non_primary and
"|non-primary" or "") + ")"
def _log(self, msg, *args):
self.logger.info(
"%s " + msg, *((self._log_desc,) + args)
)
def _log_debug(self, msg, *args):
self.logger.debug(
"%s " + msg, *((self._log_desc,) + args)
)
def __repr__(self):
return '<Mapper at 0x%x; %s>' % (
id(self), self.class_.__name__)
def __str__(self):
return "Mapper|%s|%s%s" % (
self.class_.__name__,
self.local_table is not None and
self.local_table.description or None,
self.non_primary and "|non-primary" or ""
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key.
"""
if _configure_mappers and Mapper._new_mappers:
configure_mappers()
try:
return self._props[key]
except KeyError:
raise sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key))
def get_property_by_column(self, column):
"""Given a :class:`.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
if Mapper._new_mappers:
configure_mappers()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == '*':
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" %
(m, self))
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(sql_util.find_tables(selectable,
include_aliases=True))
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.mapped_table
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used.")
elif not m.single:
if innerjoin:
from_obj = from_obj.join(m.local_table,
m.inherit_condition)
else:
from_obj = from_obj.outerjoin(m.local_table,
m.inherit_condition)
return from_obj
@_memoized_configured_property
def _single_table_criterion(self):
if self.single and \
self.inherits and \
self.polymorphic_on is not None:
return self.polymorphic_on.in_(
m.polymorphic_identity
for m in self.self_and_descendants)
else:
return None
@_memoized_configured_property
def _with_polymorphic_mappers(self):
if Mapper._new_mappers:
configure_mappers()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@_memoized_configured_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.mapped_table
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable),
False)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`.Mapper` objects included in the
default "polymorphic" query.
"""
@_memoized_configured_property
def _insert_cols_evaluating_none(self):
return dict(
(
table,
frozenset(
col.key for col in columns
if col.type.evaluates_none
)
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _insert_cols_as_none(self):
return dict(
(
table,
frozenset(
col.key for col in columns
if not col.primary_key and
not col.server_default and not col.default
and not col.type.evaluates_none)
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _propkey_to_col(self):
return dict(
(
table,
dict(
(self._columntoproperty[col].key, col)
for col in columns
)
)
for table, columns in self._cols_by_table.items()
)
@_memoized_configured_property
def _pk_keys_by_table(self):
return dict(
(
table,
frozenset([col.key for col in pks])
)
for table, pks in self._pks_by_table.items()
)
@_memoized_configured_property
def _server_default_cols(self):
return dict(
(
table,
frozenset([
col for col in columns
if col.server_default is not None])
)
for table, columns in self._cols_by_table.items()
)
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
by default.
Normally, this is equivalent to :attr:`.mapped_table`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(self, spec=None, selectable=False,
innerjoin=False):
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers,
innerjoin)
@_memoized_configured_property
def _polymorphic_properties(self):
return list(self._iterate_polymorphic_properties(
self._with_polymorphic_mappers))
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(*[
list(mapper.iterate_properties) for mapper in
[self] + mappers
])
):
if getattr(c, '_is_polymorphic_discriminator', False) and \
(self.polymorphic_on is None or
c.columns[0] is not self.polymorphic_on):
continue
yield c
@util.memoized_property
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
the :attr:`.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(self._props)
@util.memoized_property
def all_orm_descriptors(self):
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`.Mapper.attrs`.
.. warning::
the :attr:`.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. versionadded:: 0.8.0
.. seealso::
:attr:`.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes()))
@_memoized_configured_property
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.SynonymProperty)
@_memoized_configured_property
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@_memoized_configured_property
def relationships(self):
"""Return a namespace of all :class:`.RelationshipProperty`
properties maintained by this :class:`.Mapper`.
.. warning::
the :attr:`.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.RelationshipProperty)
@_memoized_configured_property
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.CompositeProperty)
def _filter_properties(self, type_):
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(util.OrderedDict(
(k, v) for k, v in self._props.items()
if isinstance(v, type_)
))
@_memoized_configured_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [(primary_key, sql.bindparam(None, type_=primary_key.type))
for primary_key in self.primary_key]
return sql.and_(*[k == v for (k, v) in params]), \
util.column_dict(params)
@_memoized_configured_property
def _equivalent_columns(self):
"""Create a map of all *equivalent* columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, i.e.
{
tablea.col1:
set([tableb.col1, tablec.col1]),
tablea.col2:
set([tabled.col2])
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {},
{'binary': visit_binary})
return result
def _is_userland_descriptor(self, obj):
if isinstance(obj, (_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement)):
return False
else:
return True
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
if local:
if self.class_.__dict__.get(assigned_name, None) is not None \
and self._is_userland_descriptor(
self.class_.__dict__[assigned_name]):
return True
else:
if getattr(self.class_, assigned_name, None) is not None \
and self._is_userland_descriptor(
getattr(self.class_, assigned_name)):
return True
if self.include_properties is not None and \
name not in self.include_properties and \
(column is None or column not in self.include_properties):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and \
(
name in self.exclude_properties or
(column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@_memoized_configured_property
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
for col in pk_cols:
if not result._has_key(col):
return False
else:
return True
def identity_key_from_row(self, row, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.RowProxy` instance. The columns which are
mapped by this :class:`.Mapper` should be locatable in the row,
preferably via the :class:`.Column` object directly (as is the case
when a :func:`.select` construct is executed), or via string names of
the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return self._identity_class, \
tuple(row[column] for column in pk_cols)
def identity_key_from_primary_key(self, primary_key):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key)
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
return self.identity_key_from_primary_key(
self.primary_key_from_instance(instance))
def _identity_key_from_state(self, state):
dict_ = state.dict
manager = state.manager
return self._identity_class, tuple([
manager[self._columntoproperty[col].key].
impl.get(state, dict_, attributes.PASSIVE_RETURN_NEVER_SET)
for col in self.primary_key
])
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
return self._primary_key_from_state(state, attributes.PASSIVE_OFF)
def _primary_key_from_state(
self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET):
dict_ = state.dict
manager = state.manager
return [
manager[prop.key].
impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
@_memoized_configured_property
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@_memoized_configured_property
def _all_pk_props(self):
collection = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@_memoized_configured_property
def _should_undefer_in_wildcard(self):
cols = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@_memoized_configured_property
def _primary_key_propkeys(self):
return set([prop.key for prop in self._all_pk_props])
def _get_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=attributes.PASSIVE_OFF)
def _get_committed_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.\
get_committed_value(state, dict_, passive=passive)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(chain(
*[sql_util.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns]
))
if self.base_mapper.local_table in tables:
return None
class ColumnsNotAvailable(Exception):
pass
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state, state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if leftval in orm_util._none_set:
raise ColumnsNotAvailable()
binary.left = sql.bindparam(None, leftval,
type_=binary.right.type)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state, state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if rightval in orm_util._none_set:
raise ColumnsNotAvailable()
binary.right = sql.bindparam(None, rightval,
type_=binary.right.type)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table,
expression.TableClause):
return None
if start and not mapper.single:
allconds.append(visitors.cloned_traverse(
mapper.inherit_condition,
{},
{'binary': visit_binary}
)
)
except ColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in attribute_names:
cols.extend(props[key].columns)
return sql.select(cols, cond, use_labels=True)
def cascade_iterator(self, type_, state, halt_on=None):
"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type_:
The name of the cascade rule (i.e. save-update, delete,
etc.)
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
the return value are object instances; this provides a strong
reference so that they don't fall out of scope immediately.
"""
visited_states = set()
prp, mpp = object(), object()
visitables = deque([(deque(self._props.values()), prp,
state, state.dict)])
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(prop.cascade_iterator(
type_, parent_state, parent_dict,
visited_states, halt_on))
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
instance, instance_mapper, corresponding_state, \
corresponding_dict = iterator.popleft()
yield instance, instance_mapper, \
corresponding_state, corresponding_dict
visitables.append((deque(instance_mapper._props.values()),
prp, corresponding_state,
corresponding_dict))
@_memoized_configured_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend([
(super_table, table)
for super_table in super_.tables
])
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if parent is not None and \
dep is not None and \
dep is not parent and \
dep.inherit_condition is not None:
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(sql_util._find_columns(
parent.inherit_condition))
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and \
cols.intersection(
util.reduce(set.union,
[l.proxy_set for l, r in
m._inherits_equated_pairs])
):
result[table].append((m, m._inherits_equated_pairs))
return result
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
most cases is invoked automatically, the first time mappings are used,
as well as whenever mappings are used and additional not-yet-configured
mappers have been constructed.
Points at which this occur include when a mapped class is instantiated
into an instance, as well as when the :meth:`.Session.query` method
is used.
The :func:`.configure_mappers` function provides several event hooks
that can be used to augment its functionality. These methods include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` does any work; this can be used to establish
additional options, properties, or related mappings before the operation
proceeds.
* :meth:`.MapperEvents.mapper_configured` - called as each indivudal
:class:`.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` is complete; at this stage, all
:class:`.Mapper` objects that are known to SQLAlchemy will be fully
configured. Note that the calling application may still have other
mappings that haven't been produced yet, such as if they are in modules
as yet unimported.
"""
if not Mapper._new_mappers:
return
_CONFIGURE_MUTEX.acquire()
try:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
if not Mapper._new_mappers:
return
Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
if getattr(mapper, '_configure_failed', False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Original exception was: %s"
% mapper._configure_failed)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(
mapper, mapper.class_)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, '_configure_failed'):
mapper._configure_failed = exc
raise
Mapper._new_mappers = False
finally:
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
Mapper.dispatch._for_class(Mapper).after_configured()
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
.. versionadded:: 0.7.7
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop('include_removes', False)
include_backrefs = kw.pop('include_backrefs', True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.info[_INSTRUMENTOR]
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
"""
instrumenting_mapper = manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
__slots__ = 'mapper',
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r" % (
column.table.name, column.name, column.key, prop))
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..." %
(column, self.mapper))
|
the-stack_0_8889 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankPaymentTradeNormalpayOperateQueryModel(object):
def __init__(self):
self._order_no = None
self._request_no = None
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
def to_alipay_dict(self):
params = dict()
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankPaymentTradeNormalpayOperateQueryModel()
if 'order_no' in d:
o.order_no = d['order_no']
if 'request_no' in d:
o.request_no = d['request_no']
return o
|
the-stack_0_8890 | #!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~ Copyright (C) 2002-2004 TechGame Networks, LLC.
##~
##~ This library is free software; you can redistribute it and/or
##~ modify it under the terms of the BSD style License as found in the
##~ LICENSE file included with this distribution.
##
## Modified by Dirk Holtwick <[email protected]>, 2007-2008
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import absolute_import
# Added by benjaoming to fix python3 tests
from __future__ import unicode_literals
try:
from future_builtins import filter
except ImportError:
pass
"""CSS-2.1 parser.
The CSS 2.1 Specification this parser was derived from can be found at http://www.w3.org/TR/CSS21/
Primary Classes:
* CSSParser
Parses CSS source forms into results using a Builder Pattern. Must
provide concrete implemenation of CSSBuilderAbstract.
* CSSBuilderAbstract
Outlines the interface between CSSParser and it's rule-builder.
Compose CSSParser with a concrete implementation of the builder to get
usable results from the CSS parser.
Dependencies:
python 2.3 (or greater)
re
"""
import re
import six
from . import cssSpecial
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def isAtRuleIdent(src, ident):
return re.match(r'^@' + ident + r'\s*', src)
def stripAtRuleIdent(src):
return re.sub(r'^@[a-z\-]+\s*', '', src)
class CSSSelectorAbstract(object):
"""Outlines the interface between CSSParser and it's rule-builder for selectors.
CSSBuilderAbstract.selector and CSSBuilderAbstract.combineSelectors must
return concrete implementations of this abstract.
See css.CSSMutableSelector for an example implementation.
"""
def addHashId(self, hashId):
raise NotImplementedError('Subclass responsibility')
def addClass(self, class_):
raise NotImplementedError('Subclass responsibility')
def addAttribute(self, attrName):
raise NotImplementedError('Subclass responsibility')
def addAttributeOperation(self, attrName, op, attrValue):
raise NotImplementedError('Subclass responsibility')
def addPseudo(self, name):
raise NotImplementedError('Subclass responsibility')
def addPseudoFunction(self, name, value):
raise NotImplementedError('Subclass responsibility')
class CSSBuilderAbstract(object):
"""Outlines the interface between CSSParser and it's rule-builder. Compose
CSSParser with a concrete implementation of the builder to get usable
results from the CSS parser.
See css.CSSBuilder for an example implementation
"""
def setCharset(self, charset):
raise NotImplementedError('Subclass responsibility')
#~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def beginStylesheet(self):
raise NotImplementedError('Subclass responsibility')
def stylesheet(self, elements):
raise NotImplementedError('Subclass responsibility')
def endStylesheet(self):
raise NotImplementedError('Subclass responsibility')
def beginInline(self):
raise NotImplementedError('Subclass responsibility')
def inline(self, declarations):
raise NotImplementedError('Subclass responsibility')
def endInline(self):
raise NotImplementedError('Subclass responsibility')
def ruleset(self, selectors, declarations):
raise NotImplementedError('Subclass responsibility')
#~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def resolveNamespacePrefix(self, nsPrefix, name):
raise NotImplementedError('Subclass responsibility')
#~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def atCharset(self, charset):
raise NotImplementedError('Subclass responsibility')
def atImport(self, import_, mediums, cssParser):
raise NotImplementedError('Subclass responsibility')
def atNamespace(self, nsPrefix, uri):
raise NotImplementedError('Subclass responsibility')
def atMedia(self, mediums, ruleset):
raise NotImplementedError('Subclass responsibility')
def atPage(self, page, pseudopage, declarations):
raise NotImplementedError('Subclass responsibility')
def atFontFace(self, declarations):
raise NotImplementedError('Subclass responsibility')
def atIdent(self, atIdent, cssParser, src):
return src, NotImplemented
#~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def combineSelectors(self, selectorA, combiner, selectorB):
"""Return value must implement CSSSelectorAbstract"""
raise NotImplementedError('Subclass responsibility')
def selector(self, name):
"""Return value must implement CSSSelectorAbstract"""
raise NotImplementedError('Subclass responsibility')
#~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def property(self, name, value, important=False):
raise NotImplementedError('Subclass responsibility')
def combineTerms(self, termA, combiner, termB):
raise NotImplementedError('Subclass responsibility')
def termIdent(self, value):
raise NotImplementedError('Subclass responsibility')
def termNumber(self, value, units=None):
raise NotImplementedError('Subclass responsibility')
def termRGB(self, value):
raise NotImplementedError('Subclass responsibility')
def termURI(self, value):
raise NotImplementedError('Subclass responsibility')
def termString(self, value):
raise NotImplementedError('Subclass responsibility')
def termUnicodeRange(self, value):
raise NotImplementedError('Subclass responsibility')
def termFunction(self, name, value):
raise NotImplementedError('Subclass responsibility')
def termUnknown(self, src):
raise NotImplementedError('Subclass responsibility')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Parser
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParseError(Exception):
src = None
ctxsrc = None
fullsrc = None
inline = False
srcCtxIdx = None
srcFullIdx = None
ctxsrcFullIdx = None
def __init__(self, msg, src, ctxsrc=None):
Exception.__init__(self, msg)
self.src = src
self.ctxsrc = ctxsrc or src
if self.ctxsrc:
self.srcCtxIdx = self.ctxsrc.find(self.src)
if self.srcCtxIdx < 0:
del self.srcCtxIdx
def __str__(self):
if self.ctxsrc:
return Exception.__str__(self) + ':: (' + repr(self.ctxsrc[:self.srcCtxIdx]) + ', ' + repr(
self.ctxsrc[self.srcCtxIdx:self.srcCtxIdx + 20]) + ')'
else:
return Exception.__str__(self) + ':: ' + repr(self.src[:40])
def setFullCSSSource(self, fullsrc, inline=False):
self.fullsrc = fullsrc
if type(self.fullsrc) == six.binary_type:
self.fullsrc = six.text_type(self.fullsrc, 'utf-8')
if inline:
self.inline = inline
if self.fullsrc:
self.srcFullIdx = self.fullsrc.find(self.src)
if self.srcFullIdx < 0:
del self.srcFullIdx
self.ctxsrcFullIdx = self.fullsrc.find(self.ctxsrc)
if self.ctxsrcFullIdx < 0:
del self.ctxsrcFullIdx
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParser(object):
"""CSS-2.1 parser dependent only upon the re module.
Implemented directly from http://www.w3.org/TR/CSS21/grammar.html
Tested with some existing CSS stylesheets for portability.
CSS Parsing API:
* setCSSBuilder()
To set your concrete implementation of CSSBuilderAbstract
* parseFile()
Use to parse external stylesheets using a file-like object
>>> cssFile = open('test.css', 'r')
>>> stylesheets = myCSSParser.parseFile(cssFile)
* parse()
Use to parse embedded stylesheets using source string
>>> cssSrc = '''
body,body.body {
font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif;
background: White;
color: Black;
}
a {text-decoration: underline;}
'''
>>> stylesheets = myCSSParser.parse(cssSrc)
* parseInline()
Use to parse inline stylesheets using attribute source string
>>> style = 'font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif; background: White; color: Black'
>>> stylesheets = myCSSParser.parseInline(style)
* parseAttributes()
Use to parse attribute string values into inline stylesheets
>>> stylesheets = myCSSParser.parseAttributes(
font='110%, "Times New Roman", Arial, Verdana, Helvetica, serif',
background='White',
color='Black')
* parseSingleAttr()
Use to parse a single string value into a CSS expression
>>> fontValue = myCSSParser.parseSingleAttr('110%, "Times New Roman", Arial, Verdana, Helvetica, serif')
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Constants / Variables / Etc.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ParseError = CSSParseError
AttributeOperators = ['=', '~=', '|=', '&=', '^=', '!=', '<>']
SelectorQualifiers = ('#', '.', '[', ':')
SelectorCombiners = ['+', '>']
ExpressionOperators = ('/', '+', ',')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Regular expressions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if True: # makes the following code foldable
_orRule = lambda *args: '|'.join(args)
_reflags = re.I | re.M | re.U
i_hex = '[0-9a-fA-F]'
i_nonascii = '[\200-\377]'
i_unicode = '\\\\(?:%s){1,6}\s?' % i_hex
i_escape = _orRule(i_unicode, '\\\\[ -~\200-\377]')
# i_nmstart = _orRule('[A-Za-z_]', i_nonascii, i_escape)
i_nmstart = _orRule('\-[^0-9]|[A-Za-z_]', i_nonascii,
i_escape) # XXX Added hyphen, http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
i_nmchar = _orRule('[-0-9A-Za-z_]', i_nonascii, i_escape)
i_ident = '((?:%s)(?:%s)*)' % (i_nmstart, i_nmchar)
re_ident = re.compile(i_ident, _reflags)
# Caution: treats all characters above 0x7f as legal for an identifier.
i_unicodeid = r'([^\u0000-\u007f]+)'
re_unicodeid = re.compile(i_unicodeid, _reflags)
i_unicodestr1 = r'(\'[^\u0000-\u007f]+\')'
i_unicodestr2 = r'(\"[^\u0000-\u007f]+\")'
i_unicodestr = _orRule(i_unicodestr1, i_unicodestr2)
re_unicodestr = re.compile(i_unicodestr, _reflags)
i_element_name = '((?:%s)|\*)' % (i_ident[1:-1],)
re_element_name = re.compile(i_element_name, _reflags)
i_namespace_selector = '((?:%s)|\*|)\|(?!=)' % (i_ident[1:-1],)
re_namespace_selector = re.compile(i_namespace_selector, _reflags)
i_class = '\\.' + i_ident
re_class = re.compile(i_class, _reflags)
i_hash = '#((?:%s)+)' % i_nmchar
re_hash = re.compile(i_hash, _reflags)
i_rgbcolor = '(#%s{8}|#%s{6}|#%s{3})' % (i_hex, i_hex, i_hex)
re_rgbcolor = re.compile(i_rgbcolor, _reflags)
i_nl = '\n|\r\n|\r|\f'
i_escape_nl = '\\\\(?:%s)' % i_nl
i_string_content = _orRule('[\t !#$%&(-~]', i_escape_nl, i_nonascii, i_escape)
i_string1 = '\"((?:%s|\')*)\"' % i_string_content
i_string2 = '\'((?:%s|\")*)\'' % i_string_content
i_string = _orRule(i_string1, i_string2)
re_string = re.compile(i_string, _reflags)
i_uri = ('url\\(\s*(?:(?:%s)|((?:%s)+))\s*\\)'
% (i_string, _orRule('[!#$%&*-~]', i_nonascii, i_escape)))
# XXX For now
# i_uri = '(url\\(.*?\\))'
re_uri = re.compile(i_uri, _reflags)
i_num = '(([-+]?[0-9]+(?:\\.[0-9]+)?)|([-+]?\\.[0-9]+))' # XXX Added out paranthesis, because e.g. .5em was not parsed correctly
re_num = re.compile(i_num, _reflags)
i_unit = '(%%|%s)?' % i_ident
re_unit = re.compile(i_unit, _reflags)
i_function = i_ident + '\\('
re_function = re.compile(i_function, _reflags)
i_functionterm = '[-+]?' + i_function
re_functionterm = re.compile(i_functionterm, _reflags)
i_unicoderange1 = "(?:U\\+%s{1,6}-%s{1,6})" % (i_hex, i_hex)
i_unicoderange2 = "(?:U\\+\?{1,6}|{h}(\?{0,5}|{h}(\?{0,4}|{h}(\?{0,3}|{h}(\?{0,2}|{h}(\??|{h}))))))"
i_unicoderange = i_unicoderange1 # '(%s|%s)' % (i_unicoderange1, i_unicoderange2)
re_unicoderange = re.compile(i_unicoderange, _reflags)
# i_comment = '(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)|(?://.*)'
# gabriel: only C convention for comments is allowed in CSS
i_comment = '(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)'
re_comment = re.compile(i_comment, _reflags)
i_important = '!\s*(important)'
re_important = re.compile(i_important, _reflags)
del _orRule
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Public
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, cssBuilder=None):
self.setCSSBuilder(cssBuilder)
#~ CSS Builder to delegate to ~~~~~~~~~~~~~~~~~~~~~~~~
def getCSSBuilder(self):
"""A concrete instance implementing CSSBuilderAbstract"""
return self._cssBuilder
def setCSSBuilder(self, cssBuilder):
"""A concrete instance implementing CSSBuilderAbstract"""
self._cssBuilder = cssBuilder
cssBuilder = property(getCSSBuilder, setCSSBuilder)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Public CSS Parsing API
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def parseFile(self, srcFile, closeFile=False):
"""Parses CSS file-like objects using the current cssBuilder.
Use for external stylesheets."""
try:
result = self.parse(srcFile.read())
finally:
if closeFile:
srcFile.close()
return result
def parse(self, src):
"""Parses CSS string source using the current cssBuilder.
Use for embedded stylesheets."""
self.cssBuilder.beginStylesheet()
try:
# XXX Some simple preprocessing
src = cssSpecial.cleanupCSS(src)
try:
src, stylesheet = self._parseStylesheet(src)
except self.ParseError as err:
err.setFullCSSSource(src)
raise
finally:
self.cssBuilder.endStylesheet()
return stylesheet
def parseInline(self, src):
"""Parses CSS inline source string using the current cssBuilder.
Use to parse a tag's 'sytle'-like attribute."""
self.cssBuilder.beginInline()
try:
try:
src, properties = self._parseDeclarationGroup(src.strip(), braces=False)
except self.ParseError as err:
err.setFullCSSSource(src, inline=True)
raise
result = self.cssBuilder.inline(properties)
finally:
self.cssBuilder.endInline()
return result
def parseAttributes(self, attributes=None, **kwAttributes):
"""Parses CSS attribute source strings, and return as an inline stylesheet.
Use to parse a tag's highly CSS-based attributes like 'font'.
See also: parseSingleAttr
"""
attributes = attributes if attributes is not None else {}
if attributes:
kwAttributes.update(attributes)
self.cssBuilder.beginInline()
try:
properties = []
try:
for propertyName, src in six.iteritems(kwAttributes):
src, property = self._parseDeclarationProperty(src.strip(), propertyName)
properties.append(property)
except self.ParseError as err:
err.setFullCSSSource(src, inline=True)
raise
result = self.cssBuilder.inline(properties)
finally:
self.cssBuilder.endInline()
return result
def parseSingleAttr(self, attrValue):
"""Parse a single CSS attribute source string, and returns the built CSS expression.
Use to parse a tag's highly CSS-based attributes like 'font'.
See also: parseAttributes
"""
results = self.parseAttributes(temp=attrValue)
if 'temp' in results[1]:
return results[1]['temp']
else:
return results[0]['temp']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Internal _parse methods
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseStylesheet(self, src):
"""stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
"""
# FIXME: BYTES to STR
if type(src) == six.binary_type:
src=six.text_type(src)
# Get rid of the comments
src = self.re_comment.sub('', src)
# [ CHARSET_SYM S* STRING S* ';' ]?
src = self._parseAtCharset(src)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
# [ import [S|CDO|CDC]* ]*
src, stylesheetImports = self._parseAtImports(src)
# [ namespace [S|CDO|CDC]* ]*
src = self._parseAtNamespace(src)
stylesheetElements = []
# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*
while src: # due to ending with ]*
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None and atResults != NotImplemented:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return src, stylesheet
def _parseSCDOCDC(self, src):
"""[S|CDO|CDC]*"""
while 1:
src = src.lstrip()
if src.startswith('<!--'):
src = src[4:]
elif src.startswith('-->'):
src = src[3:]
else:
break
return src
#~ CSS @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseAtCharset(self, src):
"""[ CHARSET_SYM S* STRING S* ';' ]?"""
if isAtRuleIdent(src, 'charset'):
src = stripAtRuleIdent(src)
charset, src = self._getString(src)
src = src.lstrip()
if src[:1] != ';':
raise self.ParseError('@charset expected a terminating \';\'', src, self.ctxsrc)
src = src[1:].lstrip()
self.cssBuilder.atCharset(charset)
return src
def _parseAtImports(self, src):
"""[ import [S|CDO|CDC]* ]*"""
result = []
while isAtRuleIdent(src, 'import'):
ctxsrc = src
src = stripAtRuleIdent(src)
import_, src = self._getStringOrURI(src)
if import_ is None:
raise self.ParseError('Import expecting string or url', src, ctxsrc)
mediums = []
medium, src = self._getIdent(src.lstrip())
while medium is not None:
mediums.append(medium)
if src[:1] == ',':
src = src[1:].lstrip()
medium, src = self._getIdent(src)
else:
break
# XXX No medium inherits and then "all" is appropriate
if not mediums:
mediums = ["all"]
if src[:1] != ';':
raise self.ParseError('@import expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
stylesheet = self.cssBuilder.atImport(import_, mediums, self)
if stylesheet is not None:
result.append(stylesheet)
src = self._parseSCDOCDC(src)
return src, result
def _parseAtNamespace(self, src):
"""namespace :
@namespace S* [IDENT S*]? [STRING|URI] S* ';' S*
"""
src = self._parseSCDOCDC(src)
while isAtRuleIdent(src, 'namespace'):
ctxsrc = src
src = stripAtRuleIdent(src)
namespace, src = self._getStringOrURI(src)
if namespace is None:
nsPrefix, src = self._getIdent(src)
if nsPrefix is None:
raise self.ParseError('@namespace expected an identifier or a URI', src, ctxsrc)
namespace, src = self._getStringOrURI(src.lstrip())
if namespace is None:
raise self.ParseError('@namespace expected a URI', src, ctxsrc)
else:
nsPrefix = None
src = src.lstrip()
if src[:1] != ';':
raise self.ParseError('@namespace expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
self.cssBuilder.atNamespace(nsPrefix, namespace)
src = self._parseSCDOCDC(src)
return src
def _parseAtKeyword(self, src):
"""[media | page | font_face | unknown_keyword]"""
ctxsrc = src
if isAtRuleIdent(src, 'media'):
src, result = self._parseAtMedia(src)
elif isAtRuleIdent(src, 'page'):
src, result = self._parseAtPage(src)
elif isAtRuleIdent(src, 'font-face'):
src, result = self._parseAtFontFace(src)
# XXX added @import, was missing!
elif isAtRuleIdent(src, 'import'):
src, result = self._parseAtImports(src)
elif isAtRuleIdent(src, 'frame'):
src, result = self._parseAtFrame(src)
elif src.startswith('@'):
src, result = self._parseAtIdent(src)
else:
raise self.ParseError('Unknown state in atKeyword', src, ctxsrc)
return src, result
def _parseAtMedia(self, src):
"""media
: MEDIA_SYM S* medium [ ',' S* medium ]* '{' S* ruleset* '}' S*
;
"""
ctxsrc = src
src = src[len('@media '):].lstrip()
mediums = []
while src and src[0] != '{':
medium, src = self._getIdent(src)
if medium is None:
raise self.ParseError('@media rule expected media identifier', src, ctxsrc)
# make "and ... {" work
if medium == 'and':
# strip up to curly bracket
pattern = re.compile('.*({.*)')
match = re.match(pattern, src)
src = src[match.end()-1:]
break
mediums.append(medium)
if src[0] == ',':
src = src[1:].lstrip()
else:
src = src.lstrip()
if not src.startswith('{'):
raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc)
src = src[1:].lstrip()
stylesheetElements = []
#while src and not src.startswith('}'):
# src, ruleset = self._parseRuleset(src)
# stylesheetElements.append(ruleset)
# src = src.lstrip()
# Containing @ where not found and parsed
while src and not src.startswith('}'):
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
src = src.lstrip()
if not src.startswith('}'):
raise self.ParseError('Ruleset closing \'}\' not found', src, ctxsrc)
else:
src = src[1:].lstrip()
result = self.cssBuilder.atMedia(mediums, stylesheetElements)
return src, result
def _parseAtPage(self, src):
"""page
: PAGE_SYM S* IDENT? pseudo_page? S*
'{' S* declaration [ ';' S* declaration ]* '}' S*
;
"""
ctxsrc = src
src = src[len('@page'):].lstrip()
page, src = self._getIdent(src)
if src[:1] == ':':
pseudopage, src = self._getIdent(src[1:])
page = page + '_' + pseudopage
else:
pseudopage = None
#src, properties = self._parseDeclarationGroup(src.lstrip())
# Containing @ where not found and parsed
stylesheetElements = []
src = src.lstrip()
properties = []
# XXX Extended for PDF use
if not src.startswith('{'):
raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc)
else:
src = src[1:].lstrip()
while src and not src.startswith('}'):
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
src, nproperties = self._parseDeclarationGroup(src.lstrip(), braces=False)
properties += nproperties
src = src.lstrip()
result = [self.cssBuilder.atPage(page, pseudopage, properties)]
return src[1:].lstrip(), result
def _parseAtFrame(self, src):
"""
XXX Proprietary for PDF
"""
src = src[len('@frame '):].lstrip()
box, src = self._getIdent(src)
src, properties = self._parseDeclarationGroup(src.lstrip())
result = [self.cssBuilder.atFrame(box, properties)]
return src.lstrip(), result
def _parseAtFontFace(self, src):
src = src[len('@font-face '):].lstrip()
src, properties = self._parseDeclarationGroup(src)
result = [self.cssBuilder.atFontFace(properties)]
return src, result
def _parseAtIdent(self, src):
ctxsrc = src
atIdent, src = self._getIdent(src[1:])
if atIdent is None:
raise self.ParseError('At-rule expected an identifier for the rule', src, ctxsrc)
src, result = self.cssBuilder.atIdent(atIdent, self, src)
if result is NotImplemented:
# An at-rule consists of everything up to and including the next semicolon (;) or the next block, whichever comes first
semiIdx = src.find(';')
if semiIdx < 0:
semiIdx = None
blockIdx = src[:semiIdx].find('{')
if blockIdx < 0:
blockIdx = None
if semiIdx is not None and semiIdx < blockIdx:
src = src[semiIdx + 1:].lstrip()
elif blockIdx is None:
# consume the rest of the content since we didn't find a block or a semicolon
src = src[-1:-1]
elif blockIdx is not None:
# expecing a block...
src = src[blockIdx:]
try:
# try to parse it as a declarations block
src, declarations = self._parseDeclarationGroup(src)
except self.ParseError:
# try to parse it as a stylesheet block
src, stylesheet = self._parseStylesheet(src)
else:
raise self.ParserError('Unable to ignore @-rule block', src, ctxsrc)
return src.lstrip(), result
#~ ruleset - see selector and declaration groups ~~~~
def _parseRuleset(self, src):
"""ruleset
: selector [ ',' S* selector ]*
'{' S* declaration [ ';' S* declaration ]* '}' S*
;
"""
src, selectors = self._parseSelectorGroup(src)
src, properties = self._parseDeclarationGroup(src.lstrip())
result = self.cssBuilder.ruleset(selectors, properties)
return src, result
#~ selector parsing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseSelectorGroup(self, src):
selectors = []
while src[:1] not in ('{', '}', ']', '(', ')', ';', ''):
src, selector = self._parseSelector(src)
if selector is None:
break
selectors.append(selector)
if src.startswith(','):
src = src[1:].lstrip()
return src, selectors
def _parseSelector(self, src):
"""selector
: simple_selector [ combinator simple_selector ]*
;
"""
src, selector = self._parseSimpleSelector(src)
srcLen = len(src) # XXX
while src[:1] not in ('', ',', ';', '{', '}', '[', ']', '(', ')'):
for combiner in self.SelectorCombiners:
if src.startswith(combiner):
src = src[len(combiner):].lstrip()
break
else:
combiner = ' '
src, selectorB = self._parseSimpleSelector(src)
# XXX Fix a bug that occured here e.g. : .1 {...}
if len(src) >= srcLen:
src = src[1:]
while src and (src[:1] not in ('', ',', ';', '{', '}', '[', ']', '(', ')')):
src = src[1:]
return src.lstrip(), None
selector = self.cssBuilder.combineSelectors(selector, combiner, selectorB)
return src.lstrip(), selector
def _parseSimpleSelector(self, src):
"""simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
"""
ctxsrc = src.lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
name, src = self._getMatchResult(self.re_element_name, src)
if name:
pass # already *successfully* assigned
elif src[:1] in self.SelectorQualifiers:
name = '*'
else:
raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)
name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name)
selector = self.cssBuilder.selector(name)
while src and src[:1] in self.SelectorQualifiers:
hash_, src = self._getMatchResult(self.re_hash, src)
if hash_ is not None:
selector.addHashId(hash_)
continue
class_, src = self._getMatchResult(self.re_class, src)
if class_ is not None:
selector.addClass(class_)
continue
if src.startswith('['):
src, selector = self._parseSelectorAttribute(src, selector)
elif src.startswith(':'):
src, selector = self._parseSelectorPseudo(src, selector)
else:
break
return src.lstrip(), selector
def _parseSelectorAttribute(self, src, selector):
"""attrib
: '[' S* [ namespace_selector ]? IDENT S* [ [ '=' | INCLUDES | DASHMATCH ] S*
[ IDENT | STRING ] S* ]? ']'
;
"""
ctxsrc = src
if not src.startswith('['):
raise self.ParseError('Selector Attribute opening \'[\' not found', src, ctxsrc)
src = src[1:].lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
attrName, src = self._getIdent(src)
src = src.lstrip()
if attrName is None:
raise self.ParseError('Expected a selector attribute name', src, ctxsrc)
if nsPrefix is not None:
attrName = self.cssBuilder.resolveNamespacePrefix(nsPrefix, attrName)
for op in self.AttributeOperators:
if src.startswith(op):
break
else:
op = ''
src = src[len(op):].lstrip()
if op:
attrValue, src = self._getIdent(src)
if attrValue is None:
attrValue, src = self._getString(src)
if attrValue is None:
raise self.ParseError('Expected a selector attribute value', src, ctxsrc)
else:
attrValue = None
if not src.startswith(']'):
raise self.ParseError('Selector Attribute closing \']\' not found', src, ctxsrc)
else:
src = src[1:]
if op:
selector.addAttributeOperation(attrName, op, attrValue)
else:
selector.addAttribute(attrName)
return src, selector
def _parseSelectorPseudo(self, src, selector):
"""pseudo
: ':' [ IDENT | function ]
;
"""
ctxsrc = src
if not src.startswith(':'):
raise self.ParseError('Selector Pseudo \':\' not found', src, ctxsrc)
src = re.search('^:{1,2}(.*)', src, re.M | re.S).group(1)
name, src = self._getIdent(src)
if not name:
raise self.ParseError('Selector Pseudo identifier not found', src, ctxsrc)
if src.startswith('('):
# function
src = src[1:].lstrip()
src, term = self._parseExpression(src, True)
if not src.startswith(')'):
raise self.ParseError('Selector Pseudo Function closing \')\' not found', src, ctxsrc)
src = src[1:]
selector.addPseudoFunction(name, term)
else:
selector.addPseudo(name)
return src, selector
#~ declaration and expression parsing ~~~~~~~~~~~~~~~
def _parseDeclarationGroup(self, src, braces=True):
ctxsrc = src
if src.startswith('{'):
src, braces = src[1:], True
elif braces:
raise self.ParseError('Declaration group opening \'{\' not found', src, ctxsrc)
properties = []
src = src.lstrip()
while src[:1] not in ('', ',', '{', '}', '[', ']', '(', ')', '@'): # XXX @?
src, property = self._parseDeclaration(src)
# XXX Workaround for styles like "*font: smaller"
if src.startswith("*"):
src = "-nothing-" + src[1:]
continue
if property is None:
src = src[1:].lstrip()
break
properties.append(property)
if src.startswith(';'):
src = src[1:].lstrip()
else:
break
if braces:
if not src.startswith('}'):
raise self.ParseError('Declaration group closing \'}\' not found', src, ctxsrc)
src = src[1:]
return src.lstrip(), properties
def _parseDeclaration(self, src):
"""declaration
: ident S* ':' S* expr prio?
| /* empty */
;
"""
# property
propertyName, src = self._getIdent(src)
if propertyName is not None:
src = src.lstrip()
# S* : S*
if src[:1] in (':', '='):
# Note: we are being fairly flexable here... technically, the
# ":" is *required*, but in the name of flexibility we
# suppor a null transition, as well as an "=" transition
src = src[1:].lstrip()
src, property = self._parseDeclarationProperty(src, propertyName)
else:
property = None
return src, property
def _parseDeclarationProperty(self, src, propertyName):
# expr
src, expr = self._parseExpression(src)
# prio?
important, src = self._getMatchResult(self.re_important, src)
src = src.lstrip()
property = self.cssBuilder.property(propertyName, expr, important)
return src, property
def _parseExpression(self, src, returnList=False):
"""
expr
: term [ operator term ]*
;
"""
src, term = self._parseExpressionTerm(src)
operator = None
while src[:1] not in ('', ';', '{', '}', '[', ']', ')'):
for operator in self.ExpressionOperators:
if src.startswith(operator):
src = src[len(operator):]
break
else:
operator = ' '
src, term2 = self._parseExpressionTerm(src.lstrip())
if term2 is NotImplemented:
break
else:
term = self.cssBuilder.combineTerms(term, operator, term2)
if operator is None and returnList:
term = self.cssBuilder.combineTerms(term, None, None)
return src, term
else:
return src, term
def _parseExpressionTerm(self, src):
"""term
: unary_operator?
[ NUMBER S* | PERCENTAGE S* | LENGTH S* | EMS S* | EXS S* | ANGLE S* |
TIME S* | FREQ S* | function ]
| STRING S* | IDENT S* | URI S* | RGB S* | UNICODERANGE S* | hexcolor
;
"""
ctxsrc = src
result, src = self._getMatchResult(self.re_num, src)
if result is not None:
units, src = self._getMatchResult(self.re_unit, src)
term = self.cssBuilder.termNumber(result, units)
return src.lstrip(), term
result, src = self._getString(src, self.re_uri)
if result is not None:
# XXX URL!!!!
term = self.cssBuilder.termURI(result)
return src.lstrip(), term
result, src = self._getString(src)
if result is not None:
term = self.cssBuilder.termString(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_functionterm, src)
if result is not None:
src, params = self._parseExpression(src, True)
if src[0] != ')':
raise self.ParseError('Terminal function expression expected closing \')\'', src, ctxsrc)
src = src[1:].lstrip()
term = self.cssBuilder.termFunction(result, params)
return src, term
result, src = self._getMatchResult(self.re_rgbcolor, src)
if result is not None:
term = self.cssBuilder.termRGB(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_unicoderange, src)
if result is not None:
term = self.cssBuilder.termUnicodeRange(result)
return src.lstrip(), term
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
result, src = self._getIdent(src)
if result is not None:
if nsPrefix is not None:
result = self.cssBuilder.resolveNamespacePrefix(nsPrefix, result)
term = self.cssBuilder.termIdent(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_unicodeid, src)
if result is not None:
term = self.cssBuilder.termIdent(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_unicodestr, src)
if result is not None:
term = self.cssBuilder.termString(result)
return src.lstrip(), term
return self.cssBuilder.termUnknown(src)
#~ utility methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _getIdent(self, src, default=None):
return self._getMatchResult(self.re_ident, src, default)
def _getString(self, src, rexpression=None, default=None):
if rexpression is None:
rexpression = self.re_string
result = rexpression.match(src)
if result:
strres = tuple(filter(None, result.groups()))
if strres:
try:
strres = strres[0]
except Exception:
strres = result.groups()[0]
else:
strres = ''
return strres, src[result.end():]
else:
return default, src
def _getStringOrURI(self, src):
result, src = self._getString(src, self.re_uri)
if result is None:
result, src = self._getString(src)
return result, src
def _getMatchResult(self, rexpression, src, default=None, group=1):
result = rexpression.match(src)
if result:
return result.group(group), src[result.end():]
else:
return default, src
|
the-stack_0_8892 | # -*- coding: utf-8 -*-
#
# pylast -
# A Python interface to Last.fm and Libre.fm
#
# Copyright 2008-2010 Amr Hassan
# Copyright 2013-2017 hugovk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# https://github.com/pylast/pylast
import hashlib
from xml.dom import minidom, Node
import xml.dom
import time
import shelve
import tempfile
import sys
import collections
import warnings
import re
import six
__version__ = '1.7.0'
__author__ = 'Amr Hassan, hugovk'
__copyright__ = "Copyright (C) 2008-2010 Amr Hassan, 2013-2017 hugovk"
__license__ = "apache2"
__email__ = '[email protected]'
def _deprecation_warning(message):
warnings.warn(message, DeprecationWarning)
def _can_use_ssl_securely():
# Python 3.3 doesn't support create_default_context() but can be made to
# work sanely.
# <2.7.9 and <3.2 never did any SSL verification so don't do SSL there.
# >3.4 and >2.7.9 has sane defaults so use SSL there.
v = sys.version_info
return v > (3, 3) or ((2, 7, 9) < v < (3, 0))
if _can_use_ssl_securely():
import ssl
if sys.version_info[0] == 3:
if _can_use_ssl_securely():
from http.client import HTTPSConnection
else:
from http.client import HTTPConnection
import html.entities as htmlentitydefs
from urllib.parse import splithost as url_split_host
from urllib.parse import quote_plus as url_quote_plus
unichr = chr
elif sys.version_info[0] == 2:
if _can_use_ssl_securely():
from httplib import HTTPSConnection
else:
from httplib import HTTPConnection
import htmlentitydefs
from urllib import splithost as url_split_host
from urllib import quote_plus as url_quote_plus
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_INVALID_SIGNATURE = 13
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
EVENT_ATTENDING = '0'
EVENT_MAYBE_ATTENDING = '1'
EVENT_NOT_ATTENDING = '2'
PERIOD_OVERALL = 'overall'
PERIOD_7DAYS = '7day'
PERIOD_1MONTH = '1month'
PERIOD_3MONTHS = '3month'
PERIOD_6MONTHS = '6month'
PERIOD_12MONTHS = '12month'
DOMAIN_ENGLISH = 0
DOMAIN_GERMAN = 1
DOMAIN_SPANISH = 2
DOMAIN_FRENCH = 3
DOMAIN_ITALIAN = 4
DOMAIN_POLISH = 5
DOMAIN_PORTUGUESE = 6
DOMAIN_SWEDISH = 7
DOMAIN_TURKISH = 8
DOMAIN_RUSSIAN = 9
DOMAIN_JAPANESE = 10
DOMAIN_CHINESE = 11
COVER_SMALL = 0
COVER_MEDIUM = 1
COVER_LARGE = 2
COVER_EXTRA_LARGE = 3
COVER_MEGA = 4
IMAGES_ORDER_POPULARITY = "popularity"
IMAGES_ORDER_DATE = "dateadded"
USER_MALE = 'Male'
USER_FEMALE = 'Female'
SCROBBLE_SOURCE_USER = "P"
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST = "R"
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST = "E"
SCROBBLE_SOURCE_LASTFM = "L"
SCROBBLE_SOURCE_UNKNOWN = "U"
SCROBBLE_MODE_PLAYED = ""
SCROBBLE_MODE_LOVED = "L"
SCROBBLE_MODE_BANNED = "B"
SCROBBLE_MODE_SKIPPED = "S"
# From http://boodebr.org/main/python/all-about-python-and-unicode#UNI_XML
RE_XML_ILLEGAL = (u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' +
u'|' +
u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])'
%
(unichr(0xd800), unichr(0xdbff), unichr(0xdc00),
unichr(0xdfff), unichr(0xd800), unichr(0xdbff),
unichr(0xdc00), unichr(0xdfff), unichr(0xd800),
unichr(0xdbff), unichr(0xdc00), unichr(0xdfff)))
XML_ILLEGAL = re.compile(RE_XML_ILLEGAL)
# Python <=3.3 doesn't support create_default_context()
# <2.7.9 and <3.2 never did any SSL verification
# FIXME This can be removed after 2017-09 when 3.3 is no longer supported and
# pypy3 uses 3.4 or later, see
# https://en.wikipedia.org/wiki/CPython#Version_history
if sys.version_info[0] == 3 and sys.version_info[1] == 3:
import certifi
SSL_CONTEXT = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
SSL_CONTEXT.verify_mode = ssl.CERT_REQUIRED
SSL_CONTEXT.options |= ssl.OP_NO_COMPRESSION
# Intermediate from https://wiki.mozilla.org/Security/Server_Side_TLS
# Create the cipher string
cipher_string = """
ECDHE-ECDSA-CHACHA20-POLY1305
ECDHE-RSA-CHACHA20-POLY1305
ECDHE-ECDSA-AES128-GCM-SHA256
ECDHE-RSA-AES128-GCM-SHA256
ECDHE-ECDSA-AES256-GCM-SHA384
ECDHE-RSA-AES256-GCM-SHA384
DHE-RSA-AES128-GCM-SHA256
DHE-RSA-AES256-GCM-SHA384
ECDHE-ECDSA-AES128-SHA256
ECDHE-RSA-AES128-SHA256
ECDHE-ECDSA-AES128-SHA
ECDHE-RSA-AES256-SHA384
ECDHE-RSA-AES128-SHA
ECDHE-ECDSA-AES256-SHA384
ECDHE-ECDSA-AES256-SHA
ECDHE-RSA-AES256-SHA
DHE-RSA-AES128-SHA256
DHE-RSA-AES128-SHA
DHE-RSA-AES256-SHA256
DHE-RSA-AES256-SHA
ECDHE-ECDSA-DES-CBC3-SHA
ECDHE-RSA-DES-CBC3-SHA
EDH-RSA-DES-CBC3-SHA
AES128-GCM-SHA256
AES256-GCM-SHA384
AES128-SHA256
AES256-SHA256
AES128-SHA
AES256-SHA
DES-CBC3-SHA
!DSS
"""
cipher_string = ' '.join(cipher_string.split())
SSL_CONTEXT.set_ciphers(cipher_string)
SSL_CONTEXT.load_verify_locations(certifi.where())
# Python >3.4 and >2.7.9 has sane defaults
elif sys.version_info > (3, 4) or ((2, 7, 9) < sys.version_info < (3, 0)):
SSL_CONTEXT = ssl.create_default_context()
class _Network(object):
"""
A music social network website such as Last.fm or
one with a Last.fm-compatible API.
"""
def __init__(
self, name, homepage, ws_server, api_key, api_secret, session_key,
submission_server, username, password_hash, domain_names, urls):
"""
name: the name of the network
homepage: the homepage URL
ws_server: the URL of the webservices server
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
submission_server: the URL of the server to which tracks are
submitted (scrobbled)
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is
the user's password
domain_names: a dict mapping each DOMAIN_* value to a string domain
name
urls: a dict mapping types to URLs
if username and password_hash were provided and not session_key,
session_key will be generated automatically when needed.
Either a valid session_key or a combination of username and
password_hash must be present for scrobbling.
You should use a preconfigured network object through a
get_*_network(...) method instead of creating an object
of this class, unless you know what you're doing.
"""
self.name = name
self.homepage = homepage
self.ws_server = ws_server
self.api_key = api_key
self.api_secret = api_secret
self.session_key = session_key
self.submission_server = submission_server
self.username = username
self.password_hash = password_hash
self.domain_names = domain_names
self.urls = urls
self.cache_backend = None
self.proxy_enabled = False
self.proxy = None
self.last_call_time = 0
self.limit_rate = False
# Generate a session_key if necessary
if ((self.api_key and self.api_secret) and not self.session_key and
(self.username and self.password_hash)):
sk_gen = SessionKeyGenerator(self)
self.session_key = sk_gen.get_session_key(
self.username, self.password_hash)
def __str__(self):
return "%s Network" % self.name
def get_artist(self, artist_name):
"""
Return an Artist object
"""
return Artist(artist_name, self)
def get_track(self, artist, title):
"""
Return a Track object
"""
return Track(artist, title, self)
def get_album(self, artist, title):
"""
Return an Album object
"""
return Album(artist, title, self)
def get_authenticated_user(self):
"""
Returns the authenticated user
"""
return AuthenticatedUser(self)
def get_country(self, country_name):
"""
Returns a country object
"""
return Country(country_name, self)
def get_metro(self, metro_name, country_name):
"""
Returns a metro object
"""
return Metro(metro_name, country_name, self)
def get_group(self, name):
"""
Returns a Group object
"""
return Group(name, self)
def get_user(self, username):
"""
Returns a user object
"""
return User(username, self)
def get_tag(self, name):
"""
Returns a tag object
"""
return Tag(name, self)
def get_scrobbler(self, client_id, client_version):
"""
Returns a Scrobbler object used for submitting tracks to the server
Quote from http://www.last.fm/api/submissions:
========
Client identifiers are used to provide a centrally managed database
of the client versions, allowing clients to be banned if they are
found to be behaving undesirably. The client ID is associated with
a version number on the server, however these are only incremented
if a client is banned and do not have to reflect the version of the
actual client application.
During development, clients which have not been allocated an
identifier should use the identifier tst, with a version number of
1.0. Do not distribute code or client implementations which use
this test identifier. Do not use the identifiers used by other
clients.
=========
To obtain a new client identifier please contact:
* Last.fm: [email protected]
* # TODO: list others
...and provide us with the name of your client and its homepage
address.
"""
_deprecation_warning(
"Use _Network.scrobble(...), _Network.scrobble_many(...),"
" and Network.update_now_playing(...) instead")
return Scrobbler(self, client_id, client_version)
def _get_language_domain(self, domain_language):
"""
Returns the mapped domain name of the network to a DOMAIN_* value
"""
if domain_language in self.domain_names:
return self.domain_names[domain_language]
def _get_url(self, domain, url_type):
return "http://%s/%s" % (
self._get_language_domain(domain), self.urls[url_type])
def _get_ws_auth(self):
"""
Returns an (API_KEY, API_SECRET, SESSION_KEY) tuple.
"""
return (self.api_key, self.api_secret, self.session_key)
def _delay_call(self):
"""
Makes sure that web service calls are at least 0.2 seconds apart.
"""
# Delay time in seconds from section 4.4 of http://www.last.fm/api/tos
DELAY_TIME = 0.2
now = time.time()
time_since_last = now - self.last_call_time
if time_since_last < DELAY_TIME:
time.sleep(DELAY_TIME - time_since_last)
self.last_call_time = now
def create_new_playlist(self, title, description):
"""
Creates a playlist for the authenticated user and returns it
title: The title of the new playlist.
description: The description of the new playlist.
"""
params = {}
params['title'] = title
params['description'] = description
doc = _Request(self, 'playlist.create', params).execute(False)
e_id = doc.getElementsByTagName("id")[0].firstChild.data
user = doc.getElementsByTagName('playlists')[0].getAttribute('user')
return Playlist(user, e_id, self)
def get_top_artists(self, limit=None, cacheable=True):
"""Returns the most played artists as a sequence of TopItem objects."""
params = {}
if limit:
params["limit"] = limit
doc = _Request(self, "chart.getTopArtists", params).execute(cacheable)
return _extract_top_artists(doc, self)
def get_top_tracks(self, limit=None, cacheable=True):
"""Returns the most played tracks as a sequence of TopItem objects."""
params = {}
if limit:
params["limit"] = limit
doc = _Request(self, "chart.getTopTracks", params).execute(cacheable)
seq = []
for node in doc.getElementsByTagName("track"):
title = _extract(node, "name")
artist = _extract(node, "name", 1)
track = Track(artist, title, self)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(track, weight))
return seq
def get_top_tags(self, limit=None, cacheable=True):
"""Returns the most used tags as a sequence of TopItem objects."""
# Last.fm has no "limit" parameter for tag.getTopTags
# so we need to get all (250) and then limit locally
doc = _Request(self, "tag.getTopTags").execute(cacheable)
seq = []
for node in doc.getElementsByTagName("tag"):
if limit and len(seq) >= limit:
break
tag = Tag(_extract(node, "name"), self)
weight = _number(_extract(node, "count"))
seq.append(TopItem(tag, weight))
return seq
def get_geo_events(
self, longitude=None, latitude=None, location=None, distance=None,
tag=None, festivalsonly=None, limit=None, cacheable=True):
"""
Returns all events in a specific location by country or city name.
Parameters:
longitude (Optional) : Specifies a longitude value to retrieve events
for (service returns nearby events by default)
latitude (Optional) : Specifies a latitude value to retrieve events for
(service returns nearby events by default)
location (Optional) : Specifies a location to retrieve events for
(service returns nearby events by default)
distance (Optional) : Find events within a specified radius
(in kilometres)
tag (Optional) : Specifies a tag to filter by.
festivalsonly[0|1] (Optional) : Whether only festivals should be
returned, or all events.
limit (Optional) : The number of results to fetch per page.
Defaults to 10.
"""
params = {}
if longitude:
params["long"] = longitude
if latitude:
params["lat"] = latitude
if location:
params["location"] = location
if limit:
params["limit"] = limit
if distance:
params["distance"] = distance
if tag:
params["tag"] = tag
if festivalsonly:
params["festivalsonly"] = 1
elif not festivalsonly:
params["festivalsonly"] = 0
doc = _Request(self, "geo.getEvents", params).execute(cacheable)
return _extract_events_from_doc(doc, self)
def get_metro_weekly_chart_dates(self, cacheable=True):
"""
Returns a list of From and To tuples for the available metro charts.
"""
doc = _Request(self, "geo.getMetroWeeklyChartlist").execute(cacheable)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append((node.getAttribute("from"), node.getAttribute("to")))
return seq
def get_metros(self, country=None, cacheable=True):
"""
Get a list of valid countries and metros for use in the other
webservices.
Parameters:
country (Optional) : Optionally restrict the results to those Metros
from a particular country, as defined by the ISO 3166-1 country
names standard.
"""
params = {}
if country:
params["country"] = country
doc = _Request(self, "geo.getMetros", params).execute(cacheable)
metros = doc.getElementsByTagName("metro")
seq = []
for metro in metros:
name = _extract(metro, "name")
country = _extract(metro, "country")
seq.append(Metro(name, country, self))
return seq
def get_geo_top_artists(self, country, limit=None, cacheable=True):
"""Get the most popular artists on Last.fm by country.
Parameters:
country (Required) : A country name, as defined by the ISO 3166-1
country names standard.
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
params = {"country": country}
if limit:
params["limit"] = limit
doc = _Request(self, "geo.getTopArtists", params).execute(cacheable)
return _extract_top_artists(doc, self)
def get_geo_top_tracks(
self, country, location=None, limit=None, cacheable=True):
"""Get the most popular tracks on Last.fm last week by country.
Parameters:
country (Required) : A country name, as defined by the ISO 3166-1
country names standard
location (Optional) : A metro name, to fetch the charts for
(must be within the country specified)
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
params = {"country": country}
if location:
params["location"] = location
if limit:
params["limit"] = limit
doc = _Request(self, "geo.getTopTracks", params).execute(cacheable)
tracks = doc.getElementsByTagName("track")
seq = []
for track in tracks:
title = _extract(track, "name")
artist = _extract(track, "name", 1)
listeners = _extract(track, "listeners")
seq.append(TopItem(Track(artist, title, self), listeners))
return seq
def enable_proxy(self, host, port):
"""Enable a default web proxy"""
self.proxy = [host, _number(port)]
self.proxy_enabled = True
def disable_proxy(self):
"""Disable using the web proxy"""
self.proxy_enabled = False
def is_proxy_enabled(self):
"""Returns True if a web proxy is enabled."""
return self.proxy_enabled
def _get_proxy(self):
"""Returns proxy details."""
return self.proxy
def enable_rate_limit(self):
"""Enables rate limiting for this network"""
self.limit_rate = True
def disable_rate_limit(self):
"""Disables rate limiting for this network"""
self.limit_rate = False
def is_rate_limited(self):
"""Return True if web service calls are rate limited"""
return self.limit_rate
def enable_caching(self, file_path=None):
"""Enables caching request-wide for all cacheable calls.
* file_path: A file path for the backend storage file. If
None set, a temp file would probably be created, according the backend.
"""
if not file_path:
file_path = tempfile.mktemp(prefix="pylast_tmp_")
self.cache_backend = _ShelfCacheBackend(file_path)
def disable_caching(self):
"""Disables all caching features."""
self.cache_backend = None
def is_caching_enabled(self):
"""Returns True if caching is enabled."""
return not (self.cache_backend is None)
def _get_cache_backend(self):
return self.cache_backend
def search_for_album(self, album_name):
"""Searches for an album by its name. Returns a AlbumSearch object.
Use get_next_page() to retrieve sequences of results."""
return AlbumSearch(album_name, self)
def search_for_artist(self, artist_name):
"""Searches of an artist by its name. Returns a ArtistSearch object.
Use get_next_page() to retrieve sequences of results."""
return ArtistSearch(artist_name, self)
def search_for_tag(self, tag_name):
"""Searches of a tag by its name. Returns a TagSearch object.
Use get_next_page() to retrieve sequences of results."""
return TagSearch(tag_name, self)
def search_for_track(self, artist_name, track_name):
"""Searches of a track by its name and its artist. Set artist to an
empty string if not available.
Returns a TrackSearch object.
Use get_next_page() to retrieve sequences of results."""
return TrackSearch(artist_name, track_name, self)
def search_for_venue(self, venue_name, country_name):
"""Searches of a venue by its name and its country. Set country_name to
an empty string if not available.
Returns a VenueSearch object.
Use get_next_page() to retrieve sequences of results."""
return VenueSearch(venue_name, country_name, self)
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
def get_artist_by_mbid(self, mbid):
"""Loooks up an artist by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "artist.getInfo", params).execute(True)
return Artist(_extract(doc, "name"), self)
def get_album_by_mbid(self, mbid):
"""Looks up an album by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "album.getInfo", params).execute(True)
return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
def update_now_playing(
self, artist, title, album=None, album_artist=None,
duration=None, track_number=None, mbid=None, context=None):
"""
Used to notify Last.fm that a user has started listening to a track.
Parameters:
artist (Required) : The artist name
title (Required) : The track title
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs
from the track artist.
duration (Optional) : The length of the track in seconds.
track_number (Optional) : The track number of the track on the
album.
mbid (Optional) : The MusicBrainz Track ID.
context (Optional) : Sub-client version
(not public, only enabled for certain API keys)
"""
params = {"track": title, "artist": artist}
if album:
params["album"] = album
if album_artist:
params["albumArtist"] = album_artist
if context:
params["context"] = context
if track_number:
params["trackNumber"] = track_number
if mbid:
params["mbid"] = mbid
if duration:
params["duration"] = duration
_Request(self, "track.updateNowPlaying", params).execute()
def scrobble(
self, artist, title, timestamp, album=None, album_artist=None,
track_number=None, duration=None, stream_id=None, context=None,
mbid=None):
"""Used to add a track-play to a user's profile.
Parameters:
artist (Required) : The artist name.
title (Required) : The track name.
timestamp (Required) : The time the track started playing, in UNIX
timestamp format (integer number of seconds since 00:00:00,
January 1st 1970 UTC). This must be in the UTC time zone.
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs from
the track artist.
context (Optional) : Sub-client version (not public, only enabled
for certain API keys)
stream_id (Optional) : The stream id for this track received from
the radio.getPlaylist service.
track_number (Optional) : The track number of the track on the
album.
mbid (Optional) : The MusicBrainz Track ID.
duration (Optional) : The length of the track in seconds.
"""
return self.scrobble_many(({
"artist": artist, "title": title, "timestamp": timestamp,
"album": album, "album_artist": album_artist,
"track_number": track_number, "duration": duration,
"stream_id": stream_id, "context": context, "mbid": mbid},))
def scrobble_many(self, tracks):
"""
Used to scrobble a batch of tracks at once. The parameter tracks is a
sequence of dicts per track containing the keyword arguments as if
passed to the scrobble() method.
"""
tracks_to_scrobble = tracks[:50]
if len(tracks) > 50:
remaining_tracks = tracks[50:]
else:
remaining_tracks = None
params = {}
for i in range(len(tracks_to_scrobble)):
params["artist[%d]" % i] = tracks_to_scrobble[i]["artist"]
params["track[%d]" % i] = tracks_to_scrobble[i]["title"]
additional_args = (
"timestamp", "album", "album_artist", "context",
"stream_id", "track_number", "mbid", "duration")
args_map_to = { # so friggin lazy
"album_artist": "albumArtist",
"track_number": "trackNumber",
"stream_id": "streamID"}
for arg in additional_args:
if arg in tracks_to_scrobble[i] and tracks_to_scrobble[i][arg]:
if arg in args_map_to:
maps_to = args_map_to[arg]
else:
maps_to = arg
params[
"%s[%d]" % (maps_to, i)] = tracks_to_scrobble[i][arg]
_Request(self, "track.scrobble", params).execute()
if remaining_tracks:
self.scrobble_many(remaining_tracks)
def get_play_links(self, link_type, things, cacheable=True):
method = link_type + ".getPlaylinks"
params = {}
for i, thing in enumerate(things):
if link_type == "artist":
params['artist[' + str(i) + ']'] = thing
elif link_type == "album":
params['artist[' + str(i) + ']'] = thing.artist
params['album[' + str(i) + ']'] = thing.title
elif link_type == "track":
params['artist[' + str(i) + ']'] = thing.artist
params['track[' + str(i) + ']'] = thing.title
doc = _Request(self, method, params).execute(cacheable)
seq = []
for node in doc.getElementsByTagName("externalids"):
spotify = _extract(node, "spotify")
seq.append(spotify)
return seq
def get_artist_play_links(self, artists, cacheable=True):
return self.get_play_links("artist", artists, cacheable)
def get_album_play_links(self, albums, cacheable=True):
return self.get_play_links("album", albums, cacheable)
def get_track_play_links(self, tracks, cacheable=True):
return self.get_play_links("track", tracks, cacheable)
class LastFMNetwork(_Network):
"""A Last.fm network object
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the
user's password
if username and password_hash were provided and not session_key,
session_key will be generated automatically when needed.
Either a valid session_key or a combination of username and password_hash
must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see
about obtaining them from:
http://www.last.fm/api/account
"""
def __init__(
self, api_key="", api_secret="", session_key="", username="",
password_hash=""):
_Network.__init__(
self,
name="Last.fm",
homepage="http://last.fm",
ws_server=("ws.audioscrobbler.com", "/2.0/"),
api_key=api_key,
api_secret=api_secret,
session_key=session_key,
submission_server="http://post.audioscrobbler.com:80/",
username=username,
password_hash=password_hash,
domain_names={
DOMAIN_ENGLISH: 'www.last.fm',
DOMAIN_GERMAN: 'www.lastfm.de',
DOMAIN_SPANISH: 'www.lastfm.es',
DOMAIN_FRENCH: 'www.lastfm.fr',
DOMAIN_ITALIAN: 'www.lastfm.it',
DOMAIN_POLISH: 'www.lastfm.pl',
DOMAIN_PORTUGUESE: 'www.lastfm.com.br',
DOMAIN_SWEDISH: 'www.lastfm.se',
DOMAIN_TURKISH: 'www.lastfm.com.tr',
DOMAIN_RUSSIAN: 'www.lastfm.ru',
DOMAIN_JAPANESE: 'www.lastfm.jp',
DOMAIN_CHINESE: 'cn.last.fm',
},
urls={
"album": "music/%(artist)s/%(album)s",
"artist": "music/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def __repr__(self):
return "pylast.LastFMNetwork(%s)" % (", ".join(
("'%s'" % self.api_key,
"'%s'" % self.api_secret,
"'%s'" % self.session_key,
"'%s'" % self.username,
"'%s'" % self.password_hash)))
def get_lastfm_network(
api_key="", api_secret="", session_key="", username="",
password_hash=""):
"""
Returns a preconfigured _Network object for Last.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the
user's password
if username and password_hash were provided and not session_key,
session_key will be generated automatically when needed.
Either a valid session_key or a combination of username and password_hash
must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see
about obtaining them from:
http://www.last.fm/api/account
"""
_deprecation_warning("Create a LastFMNetwork object instead")
return LastFMNetwork(
api_key, api_secret, session_key, username, password_hash)
class LibreFMNetwork(_Network):
"""
A preconfigured _Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the
user's password
if username and password_hash were provided and not session_key,
session_key will be generated automatically when needed.
"""
def __init__(
self, api_key="", api_secret="", session_key="", username="",
password_hash=""):
_Network.__init__(
self,
name="Libre.fm",
homepage="http://libre.fm",
ws_server=("libre.fm", "/2.0/"),
api_key=api_key,
api_secret=api_secret,
session_key=session_key,
submission_server="http://turtle.libre.fm:80/",
username=username,
password_hash=password_hash,
domain_names={
DOMAIN_ENGLISH: "libre.fm",
DOMAIN_GERMAN: "libre.fm",
DOMAIN_SPANISH: "libre.fm",
DOMAIN_FRENCH: "libre.fm",
DOMAIN_ITALIAN: "libre.fm",
DOMAIN_POLISH: "libre.fm",
DOMAIN_PORTUGUESE: "libre.fm",
DOMAIN_SWEDISH: "libre.fm",
DOMAIN_TURKISH: "libre.fm",
DOMAIN_RUSSIAN: "libre.fm",
DOMAIN_JAPANESE: "libre.fm",
DOMAIN_CHINESE: "libre.fm",
},
urls={
"album": "artist/%(artist)s/album/%(album)s",
"artist": "artist/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def __repr__(self):
return "pylast.LibreFMNetwork(%s)" % (", ".join(
("'%s'" % self.api_key,
"'%s'" % self.api_secret,
"'%s'" % self.session_key,
"'%s'" % self.username,
"'%s'" % self.password_hash)))
def get_librefm_network(
api_key="", api_secret="", session_key="", username="",
password_hash=""):
"""
Returns a preconfigured _Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the
user's password
if username and password_hash were provided and not session_key,
session_key will be generated automatically when needed.
"""
_deprecation_warning(
"DeprecationWarning: Create a LibreFMNetwork object instead")
return LibreFMNetwork(
api_key, api_secret, session_key, username, password_hash)
class _ShelfCacheBackend(object):
"""Used as a backend for caching cacheable requests."""
def __init__(self, file_path=None):
self.shelf = shelve.open(file_path)
def __iter__(self):
return iter(self.shelf.keys())
def get_xml(self, key):
return self.shelf[key]
def set_xml(self, key, xml_string):
self.shelf[key] = xml_string
class _Request(object):
"""Representing an abstract web service operation."""
def __init__(self, network, method_name, params={}):
self.network = network
self.params = {}
for key in params:
self.params[key] = _unicode(params[key])
(self.api_key, self.api_secret, self.session_key) = \
network._get_ws_auth()
self.params["api_key"] = self.api_key
self.params["method"] = method_name
if network.is_caching_enabled():
self.cache = network._get_cache_backend()
if self.session_key:
self.params["sk"] = self.session_key
self.sign_it()
def sign_it(self):
"""Sign this request."""
if "api_sig" not in self.params.keys():
self.params['api_sig'] = self._get_signature()
def _get_signature(self):
"""
Returns a 32-character hexadecimal md5 hash of the signature string.
"""
keys = list(self.params.keys())
keys.sort()
string = ""
for name in keys:
string += name
string += self.params[name]
string += self.api_secret
return md5(string)
def _get_cache_key(self):
"""
The cache key is a string of concatenated sorted names and values.
"""
keys = list(self.params.keys())
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + self.params[key]
return hashlib.sha1(cache_key.encode("utf-8")).hexdigest()
def _get_cached_response(self):
"""Returns a file object of the cached response."""
if not self._is_cached():
response = self._download_response()
self.cache.set_xml(self._get_cache_key(), response)
return self.cache.get_xml(self._get_cache_key())
def _is_cached(self):
"""Returns True if the request is already in cache."""
return self._get_cache_key() in self.cache
def _download_response(self):
"""Returns a response body string from the server."""
if self.network.limit_rate:
self.network._delay_call()
data = []
for name in self.params.keys():
data.append('='.join((
name, url_quote_plus(_string(self.params[name])))))
data = '&'.join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
'Accept-Charset': 'utf-8',
'User-Agent': "pylast" + '/' + __version__
}
(HOST_NAME, HOST_SUBDIR) = self.network.ws_server
if self.network.is_proxy_enabled():
if _can_use_ssl_securely():
conn = HTTPSConnection(
context=SSL_CONTEXT,
host=self.network._get_proxy()[0],
port=self.network._get_proxy()[1])
else:
conn = HTTPConnection(
host=self.network._get_proxy()[0],
port=self.network._get_proxy()[1])
try:
conn.request(
method='POST', url="http://" + HOST_NAME + HOST_SUBDIR,
body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
else:
if _can_use_ssl_securely():
conn = HTTPSConnection(
context=SSL_CONTEXT,
host=HOST_NAME
)
else:
conn = HTTPConnection(
host=HOST_NAME
)
try:
conn.request(
method='POST', url=HOST_SUBDIR, body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
try:
response_text = _unicode(conn.getresponse().read())
except Exception as e:
raise MalformedResponseError(self.network, e)
response_text = XML_ILLEGAL.sub("?", response_text)
self._check_response_for_errors(response_text)
return response_text
def execute(self, cacheable=False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response).replace(
"opensearch:", ""))
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
try:
doc = minidom.parseString(_string(response).replace(
"opensearch:", ""))
except Exception as e:
raise MalformedResponseError(self.network, e)
e = doc.getElementsByTagName('lfm')[0]
if e.getAttribute('status') != "ok":
e = doc.getElementsByTagName('error')[0]
status = e.getAttribute('code')
details = e.firstChild.data.strip()
raise WSError(self.network, status, details)
class SessionKeyGenerator(object):
"""Methods of generating a session key:
1) Web Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. sg = SessionKeyGenerator(network)
c. url = sg.get_web_auth_url()
d. Ask the user to open the url and authorize you, and wait for it.
e. session_key = sg.get_web_auth_session_key(url)
2) Username and Password Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. username = raw_input("Please enter your username: ")
c. password_hash = pylast.md5(raw_input("Please enter your password: ")
d. session_key = SessionKeyGenerator(network).get_session_key(username,
password_hash)
A session key's lifetime is infinite, unless the user revokes the rights
of the given API Key.
If you create a Network object with just a API_KEY and API_SECRET and a
username and a password_hash, a SESSION_KEY will be automatically generated
for that network and stored in it so you don't have to do this manually,
unless you want to.
"""
def __init__(self, network):
self.network = network
self.web_auth_tokens = {}
def _get_web_auth_token(self):
"""
Retrieves a token from the network for web authentication.
The token then has to be authorized from getAuthURL before creating
session.
"""
request = _Request(self.network, 'auth.getToken')
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
e = doc.getElementsByTagName('token')[0]
return e.firstChild.data
def get_web_auth_url(self):
"""
The user must open this page, and you first, then
call get_web_auth_session_key(url) after that.
"""
token = self._get_web_auth_token()
url = '%(homepage)s/api/auth/?api_key=%(api)s&token=%(token)s' % \
{"homepage": self.network.homepage,
"api": self.network.api_key, "token": token}
self.web_auth_tokens[url] = token
return url
def get_web_auth_session_key(self, url):
"""
Retrieves the session key of a web authorization process by its url.
"""
if url in self.web_auth_tokens.keys():
token = self.web_auth_tokens[url]
else:
# That's going to raise a WSError of an unauthorized token when the
# request is executed.
token = ""
request = _Request(self.network, 'auth.getSession', {'token': token})
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return doc.getElementsByTagName('key')[0].firstChild.data
def get_session_key(self, username, password_hash):
"""
Retrieve a session key with a username and a md5 hash of the user's
password.
"""
params = {
"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key")
TopItem = collections.namedtuple("TopItem", ["item", "weight"])
SimilarItem = collections.namedtuple("SimilarItem", ["item", "match"])
LibraryItem = collections.namedtuple(
"LibraryItem", ["item", "playcount", "tagcount"])
PlayedTrack = collections.namedtuple(
"PlayedTrack", ["track", "album", "playback_date", "timestamp"])
LovedTrack = collections.namedtuple(
"LovedTrack", ["track", "date", "timestamp"])
ImageSizes = collections.namedtuple(
"ImageSizes", [
"original", "large", "largesquare", "medium", "small", "extralarge"])
Image = collections.namedtuple(
"Image", [
"title", "url", "dateadded", "format", "owner", "sizes", "votes"])
Shout = collections.namedtuple(
"Shout", ["body", "author", "date"])
def _string_output(funct):
def r(*args):
return _string(funct(*args))
return r
def _pad_list(given_list, desired_length, padding=None):
"""
Pads a list to be of the desired_length.
"""
while len(given_list) < desired_length:
given_list.append(padding)
return given_list
class _BaseObject(object):
"""An abstract webservices object."""
network = None
def __init__(self, network, ws_prefix):
self.network = network
self.ws_prefix = ws_prefix
def _request(self, method_name, cacheable=False, params=None):
if not params:
params = self._get_params()
return _Request(self.network, method_name, params).execute(cacheable)
def _get_params(self):
"""Returns the most common set of parameters between all objects."""
return {}
def __hash__(self):
# Convert any ints (or whatever) into strings
values = map(six.text_type, self._get_params().values())
return hash(self.network) + hash(six.text_type(type(self)) + "".join(
list(self._get_params().keys()) + list(values)
).lower())
def _extract_cdata_from_request(self, method_name, tag_name, params):
doc = self._request(method_name, True, params)
return doc.getElementsByTagName(
tag_name)[0].firstChild.wholeText.strip()
def _get_things(
self, method, thing, thing_type, params=None, cacheable=True):
"""Returns a list of the most played thing_types by this thing."""
doc = self._request(
self.ws_prefix + "." + method, cacheable, params)
seq = []
for node in doc.getElementsByTagName(thing):
title = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
seq.append(TopItem(
thing_type(artist, title, self.network), playcount))
return seq
def get_top_fans(self, limit=None, cacheable=True):
"""Returns a list of the Users who played this the most.
# Parameters:
* limit int: Max elements.
# For Artist/Track
"""
doc = self._request(self.ws_prefix + '.getTopFans', cacheable)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message=None):
"""
Shares this (sends out recommendations).
Parameters:
* users [User|str,]: A list that can contain usernames, emails,
User objects, or all of them.
* message str: A message to include in the recommendation message.
Only for Artist/Event/Track.
"""
# Last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request(self.ws_prefix + '.share', False, params)
def get_wiki_published_date(self):
"""
Returns the summary of the wiki.
Only for Album/Track.
"""
return self.get_wiki("published")
def get_wiki_summary(self):
"""
Returns the summary of the wiki.
Only for Album/Track.
"""
return self.get_wiki("summary")
def get_wiki_content(self):
"""
Returns the summary of the wiki.
Only for Album/Track.
"""
return self.get_wiki("content")
def get_wiki(self, section):
"""
Returns a section of the wiki.
Only for Album/Track.
section can be "content", "summary" or
"published" (for published date)
"""
doc = self._request(self.ws_prefix + ".getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, section)
def get_shouts(self, limit=50, cacheable=False):
"""
Returns a sequence of Shout objects
"""
shouts = []
for node in _collect_nodes(
limit,
self,
self.ws_prefix + ".getShouts",
cacheable):
shouts.append(
Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
class _Chartable(object):
"""Common functions for classes with charts."""
def __init__(self, ws_prefix):
self.ws_prefix = ws_prefix # TODO move to _BaseObject?
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request(self.ws_prefix + ".getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append((node.getAttribute("from"), node.getAttribute("to")))
return seq
def get_weekly_album_charts(self, from_date=None, to_date=None):
"""
Returns the weekly album charts for the week starting from the
from_date value to the to_date value.
Only for Group or User.
"""
return self.get_weekly_charts("album", from_date, to_date)
def get_weekly_artist_charts(self, from_date=None, to_date=None):
"""
Returns the weekly artist charts for the week starting from the
from_date value to the to_date value.
Only for Group, Tag or User.
"""
return self.get_weekly_charts("artist", from_date, to_date)
def get_weekly_track_charts(self, from_date=None, to_date=None):
"""
Returns the weekly track charts for the week starting from the
from_date value to the to_date value.
Only for Group or User.
"""
return self.get_weekly_charts("track", from_date, to_date)
def get_weekly_charts(self, chart_kind, from_date=None, to_date=None):
"""
Returns the weekly charts for the week starting from the
from_date value to the to_date value.
chart_kind should be one of "album", "artist" or "track"
"""
method = ".getWeekly" + chart_kind.title() + "Chart"
chart_type = eval(chart_kind.title()) # string to type
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request(
self.ws_prefix + method, True, params)
seq = []
for node in doc.getElementsByTagName(chart_kind.lower()):
item = chart_type(
_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
class _Taggable(object):
"""Common functions for classes with tags."""
def __init__(self, ws_prefix):
self.ws_prefix = ws_prefix # TODO move to _BaseObject
def add_tags(self, tags):
"""Adds one or several tags.
* tags: A sequence of tag names or Tag objects.
"""
for tag in tags:
self.add_tag(tag)
def add_tag(self, tag):
"""Adds one tag.
* tag: a tag name or a Tag object.
"""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tags'] = tag
self._request(self.ws_prefix + '.addTags', False, params)
def remove_tag(self, tag):
"""Remove a user's tag from this object."""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tag'] = tag
self._request(self.ws_prefix + '.removeTag', False, params)
def get_tags(self):
"""Returns a list of the tags set by the user to this object."""
# Uncacheable because it can be dynamically changed by the user.
params = self._get_params()
doc = self._request(self.ws_prefix + '.getTags', False, params)
tag_names = _extract_all(doc, 'name')
tags = []
for tag in tag_names:
tags.append(Tag(tag, self.network))
return tags
def remove_tags(self, tags):
"""Removes one or several tags from this object.
* tags: a sequence of tag names or Tag objects.
"""
for tag in tags:
self.remove_tag(tag)
def clear_tags(self):
"""Clears all the user-set tags. """
self.remove_tags(*(self.get_tags()))
def set_tags(self, tags):
"""Sets this object's tags to only those tags.
* tags: a sequence of tag names or Tag objects.
"""
c_old_tags = []
old_tags = []
c_new_tags = []
new_tags = []
to_remove = []
to_add = []
tags_on_server = self.get_tags()
for tag in tags_on_server:
c_old_tags.append(tag.get_name().lower())
old_tags.append(tag.get_name())
for tag in tags:
c_new_tags.append(tag.lower())
new_tags.append(tag)
for i in range(0, len(old_tags)):
if not c_old_tags[i] in c_new_tags:
to_remove.append(old_tags[i])
for i in range(0, len(new_tags)):
if not c_new_tags[i] in c_old_tags:
to_add.append(new_tags[i])
self.remove_tags(to_remove)
self.add_tags(to_add)
def get_top_tags(self, limit=None):
"""Returns a list of the most frequently used Tags on this object."""
doc = self._request(self.ws_prefix + '.getTopTags', True)
elements = doc.getElementsByTagName('tag')
seq = []
for element in elements:
tag_name = _extract(element, 'name')
tagcount = _extract(element, 'count')
seq.append(TopItem(Tag(tag_name, self.network), tagcount))
if limit:
seq = seq[:limit]
return seq
class WSError(Exception):
"""Exception related to the Network web service"""
def __init__(self, network, status, details):
self.status = status
self.details = details
self.network = network
@_string_output
def __str__(self):
return self.details
def get_id(self):
"""Returns the exception ID, from one of the following:
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
"""
return self.status
class MalformedResponseError(Exception):
"""Exception conveying a malformed response from the music network."""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "Malformed response from {}. Underlying error: {}".format(
self.network.name, str(self.underlying_error))
class NetworkError(Exception):
"""Exception conveying a problem in sending a request to Last.fm"""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "NetworkError: %s" % str(self.underlying_error)
class _Opus(_BaseObject, _Taggable):
"""An album or track."""
artist = None
title = None
username = None
__hash__ = _BaseObject.__hash__
def __init__(self, artist, title, network, ws_prefix, username=None):
"""
Create an opus instance.
# Parameters:
* artist: An artist name or an Artist object.
* title: The album or track title.
* ws_prefix: 'album' or 'track'
"""
_BaseObject.__init__(self, network, ws_prefix)
_Taggable.__init__(self, ws_prefix)
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
self.username = username
def __repr__(self):
return "pylast.%s(%s, %s, %s)" % (
self.ws_prefix.title(), repr(self.artist.name),
repr(self.title), repr(self.network))
@_string_output
def __str__(self):
return _unicode("%s - %s") % (
self.get_artist().get_name(), self.get_title())
def __eq__(self, other):
if type(self) != type(other):
return False
a = self.get_title().lower()
b = other.get_title().lower()
c = self.get_artist().get_name().lower()
d = other.get_artist().get_name().lower()
return (a == b) and (c == d)
def __ne__(self, other):
return not self.__eq__(other)
def _get_params(self):
return {
'artist': self.get_artist().get_name(),
self.ws_prefix: self.get_title()}
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self, properly_capitalized=False):
"""Returns the artist or track title."""
if properly_capitalized:
self.title = _extract(
self._request(self.ws_prefix + ".getInfo", True), "name")
return self.title
def get_name(self, properly_capitalized=False):
"""Returns the album or track title (alias to get_title())."""
return self.get_title(properly_capitalized)
def get_id(self):
"""Returns the ID on the network."""
return _extract(
self._request(self.ws_prefix + ".getInfo", cacheable=True), "id")
def get_playcount(self):
"""Returns the number of plays on the network"""
return _number(_extract(
self._request(
self.ws_prefix + ".getInfo", cacheable=True), "playcount"))
def get_userplaycount(self):
"""Returns the number of plays by a given username"""
if not self.username:
return
params = self._get_params()
params['username'] = self.username
doc = self._request(self.ws_prefix + ".getInfo", True, params)
return _number(_extract(doc, "userplaycount"))
def get_listener_count(self):
"""Returns the number of listeners on the network"""
return _number(_extract(
self._request(
self.ws_prefix + ".getInfo", cacheable=True), "listeners"))
def get_mbid(self):
"""Returns the MusicBrainz ID of the album or track."""
doc = self._request(self.ws_prefix + ".getInfo", cacheable=True)
try:
lfm = doc.getElementsByTagName('lfm')[0]
opus = next(self._get_children_by_tag_name(lfm, self.ws_prefix))
mbid = next(self._get_children_by_tag_name(opus, "mbid"))
return mbid.firstChild.nodeValue
except StopIteration:
return None
def _get_children_by_tag_name(self, node, tag_name):
for child in node.childNodes:
if (child.nodeType == child.ELEMENT_NODE and
(tag_name == '*' or child.tagName == tag_name)):
yield child
class Album(_Opus):
"""An album."""
__hash__ = _Opus.__hash__
def __init__(self, artist, title, network, username=None):
super(Album, self).__init__(artist, title, network, "album", username)
def get_release_date(self):
"""Returns the release date of the album."""
return _extract(self._request(
self.ws_prefix + ".getInfo", cacheable=True), "releasedate")
def get_cover_image(self, size=COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(
self._request(
self.ws_prefix + ".getInfo", cacheable=True), 'image')[size]
def get_tracks(self):
"""Returns the list of Tracks on this album."""
return _extract_tracks(
self._request(
self.ws_prefix + ".getInfo", cacheable=True), "tracks")
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the URL of the album or track page on the network.
# Parameters:
* domain_name str: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
title = _url_safe(self.get_title())
return self.network._get_url(
domain_name, self.ws_prefix) % {
'artist': artist, 'album': title}
class Artist(_BaseObject, _Taggable):
"""An artist."""
name = None
username = None
__hash__ = _BaseObject.__hash__
def __init__(self, name, network, username=None):
"""Create an artist object.
# Parameters:
* name str: The artist's name.
"""
_BaseObject.__init__(self, network, 'artist')
_Taggable.__init__(self, 'artist')
self.name = name
self.username = username
def __repr__(self):
return "pylast.Artist(%s, %s)" % (
repr(self.get_name()), repr(self.network))
def __unicode__(self):
return six.text_type(self.get_name())
@_string_output
def __str__(self):
return self.__unicode__()
def __eq__(self, other):
if type(self) is type(other):
return self.get_name().lower() == other.get_name().lower()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_params(self):
return {self.ws_prefix: self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the name of the artist.
If properly_capitalized was asserted then the name would be downloaded
overwriting the given one."""
if properly_capitalized:
self.name = _extract(
self._request(self.ws_prefix + ".getInfo", True), "name")
return self.name
def get_correction(self):
"""Returns the corrected artist name."""
return _extract(
self._request(self.ws_prefix + ".getCorrection"), "name")
def get_cover_image(self, size=COVER_MEGA):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(
self._request(self.ws_prefix + ".getInfo", True), "image")[size]
def get_playcount(self):
"""Returns the number of plays on the network."""
return _number(_extract(
self._request(self.ws_prefix + ".getInfo", True), "playcount"))
def get_userplaycount(self):
"""Returns the number of plays by a given username"""
if not self.username:
return
params = self._get_params()
params['username'] = self.username
doc = self._request(self.ws_prefix + ".getInfo", True, params)
return _number(_extract(doc, "userplaycount"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this artist."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the number of listeners on the network."""
if hasattr(self, "listener_count"):
return self.listener_count
else:
self.listener_count = _number(_extract(
self._request(self.ws_prefix + ".getInfo", True), "listeners"))
return self.listener_count
def is_streamable(self):
"""Returns True if the artist is streamable."""
return bool(_number(_extract(
self._request(self.ws_prefix + ".getInfo", True), "streamable")))
def get_bio(self, section, language=None):
"""
Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date)
"""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return self._extract_cdata_from_request(
self.ws_prefix + ".getInfo", section, params)
def get_bio_published_date(self):
"""Returns the date on which the artist's biography was published."""
return self.get_bio("published")
def get_bio_summary(self, language=None):
"""Returns the summary of the artist's biography."""
return self.get_bio("summary", language)
def get_bio_content(self, language=None):
"""Returns the content of the artist's biography."""
return self.get_bio("content", language)
def get_upcoming_events(self):
"""Returns a list of the upcoming Events for this artist."""
doc = self._request(self.ws_prefix + '.getEvents', True)
return _extract_events_from_doc(doc, self.network)
def get_similar(self, limit=None):
"""Returns the similar artists on the network."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request(self.ws_prefix + '.getSimilar', True, params)
names = _extract_all(doc, "name")
matches = _extract_all(doc, "match")
artists = []
for i in range(0, len(names)):
artists.append(SimilarItem(
Artist(names[i], self.network), _number(matches[i])))
return artists
def get_top_albums(self, limit=None, cacheable=True):
"""Returns a list of the top albums."""
params = self._get_params()
if limit:
params['limit'] = limit
return self._get_things(
"getTopAlbums", "album", Album, params, cacheable)
def get_top_tracks(self, limit=None, cacheable=True):
"""Returns a list of the most played Tracks by this artist."""
params = self._get_params()
if limit:
params['limit'] = limit
return self._get_things(
"getTopTracks", "track", Track, params, cacheable)
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the artist page on the network.
# Parameters:
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_name())
return self.network._get_url(
domain_name, "artist") % {'artist': artist}
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("artist.Shout", False, params)
def get_band_members(self):
"""Returns a list of band members or None if unknown."""
names = None
doc = self._request(self.ws_prefix + ".getInfo", True)
for node in doc.getElementsByTagName("bandmembers"):
names = _extract_all(node, "name")
return names
class Event(_BaseObject):
"""An event."""
id = None
__hash__ = _BaseObject.__hash__
def __init__(self, event_id, network):
_BaseObject.__init__(self, network, 'event')
self.id = event_id
def __repr__(self):
return "pylast.Event(%s, %s)" % (repr(self.id), repr(self.network))
@_string_output
def __str__(self):
return "Event #" + str(self.get_id())
def __eq__(self, other):
if type(self) is type(other):
return self.get_id() == other.get_id()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_params(self):
return {'event': self.get_id()}
def attend(self, attending_status):
"""Sets the attending status.
* attending_status: The attending status. Possible values:
o EVENT_ATTENDING
o EVENT_MAYBE_ATTENDING
o EVENT_NOT_ATTENDING
"""
params = self._get_params()
params['status'] = attending_status
self._request('event.attend', False, params)
def get_attendees(self):
"""
Get a list of attendees for an event
"""
doc = self._request("event.getAttendees", False)
users = []
for name in _extract_all(doc, "name"):
users.append(User(name, self.network))
return users
def get_id(self):
"""Returns the id of the event on the network. """
return self.id
def get_title(self):
"""Returns the title of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "title")
def get_headliner(self):
"""Returns the headliner of the event. """
doc = self._request("event.getInfo", True)
return Artist(_extract(doc, "headliner"), self.network)
def get_artists(self):
"""Returns a list of the participating Artists. """
doc = self._request("event.getInfo", True)
names = _extract_all(doc, "artist")
artists = []
for name in names:
artists.append(Artist(name, self.network))
return artists
def get_venue(self):
"""Returns the venue where the event is held."""
doc = self._request("event.getInfo", True)
v = doc.getElementsByTagName("venue")[0]
venue_id = _number(_extract(v, "id"))
return Venue(venue_id, self.network, venue_element=v)
def get_start_date(self):
"""Returns the date when the event starts."""
doc = self._request("event.getInfo", True)
return _extract(doc, "startDate")
def get_description(self):
"""Returns the description of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "description")
def get_cover_image(self, size=COVER_MEGA):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
doc = self._request("event.getInfo", True)
return _extract_all(doc, "image")[size]
def get_attendance_count(self):
"""Returns the number of attending people. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "attendance"))
def get_review_count(self):
"""Returns the number of available reviews for this event. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "reviews"))
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
return self.network._get_url(
domain_name, "event") % {'id': self.get_id()}
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("event.Shout", False, params)
class Country(_BaseObject):
"""A country at Last.fm."""
name = None
__hash__ = _BaseObject.__hash__
def __init__(self, name, network):
_BaseObject.__init__(self, network, "geo")
self.name = name
def __repr__(self):
return "pylast.Country(%s, %s)" % (repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self): # TODO can move to _BaseObject
return {'country': self.get_name()}
def _get_name_from_code(self, alpha2code):
# TODO: Have this function lookup the alpha-2 code and return the
# country name.
return alpha2code
def get_name(self):
"""Returns the country name. """
return self.name
def get_top_artists(self, limit=None, cacheable=True):
"""Returns a sequence of the most played artists."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request('geo.getTopArtists', cacheable, params)
return _extract_top_artists(doc, self)
def get_top_tracks(self, limit=None, cacheable=True):
"""Returns a sequence of the most played tracks"""
params = self._get_params()
if limit:
params['limit'] = limit
return self._get_things(
"getTopTracks", "track", Track, params, cacheable)
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
country_name = _url_safe(self.get_name())
return self.network._get_url(
domain_name, "country") % {'country_name': country_name}
class Metro(_BaseObject):
"""A metro at Last.fm."""
name = None
country = None
__hash__ = _BaseObject.__hash__
def __init__(self, name, country, network):
_BaseObject.__init__(self, network, None)
self.name = name
self.country = country
def __repr__(self):
return "pylast.Metro(%s, %s, %s)" % (
repr(self.name), repr(self.country), repr(self.network))
@_string_output
def __str__(self):
return self.get_name() + ", " + self.get_country()
def __eq__(self, other):
return (self.get_name().lower() == other.get_name().lower() and
self.get_country().lower() == other.get_country().lower())
def __ne__(self, other):
return (self.get_name() != other.get_name() or
self.get_country().lower() != other.get_country().lower())
def _get_params(self):
return {'metro': self.get_name(), 'country': self.get_country()}
def get_name(self):
"""Returns the metro name."""
return self.name
def get_country(self):
"""Returns the metro country."""
return self.country
def _get_chart(
self, method, tag="artist", limit=None, from_date=None,
to_date=None, cacheable=True):
"""Internal helper for getting geo charts."""
params = self._get_params()
if limit:
params["limit"] = limit
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request(method, cacheable, params)
seq = []
for node in doc.getElementsByTagName(tag):
if tag == "artist":
item = Artist(_extract(node, "name"), self.network)
elif tag == "track":
title = _extract(node, "name")
artist = _extract_element_tree(node).get('artist')['name']
item = Track(artist, title, self.network)
else:
return None
weight = _number(_extract(node, "listeners"))
seq.append(TopItem(item, weight))
return seq
def get_artist_chart(
self, tag="artist", limit=None, from_date=None, to_date=None,
cacheable=True):
"""Get a chart of artists for a metro.
Parameters:
from_date (Optional) : Beginning timestamp of the weekly range
requested
to_date (Optional) : Ending timestamp of the weekly range requested
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
return self._get_chart(
"geo.getMetroArtistChart", tag=tag, limit=limit,
from_date=from_date, to_date=to_date, cacheable=cacheable)
def get_hype_artist_chart(
self, tag="artist", limit=None, from_date=None, to_date=None,
cacheable=True):
"""Get a chart of hyped (up and coming) artists for a metro.
Parameters:
from_date (Optional) : Beginning timestamp of the weekly range
requested
to_date (Optional) : Ending timestamp of the weekly range requested
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
return self._get_chart(
"geo.getMetroHypeArtistChart", tag=tag, limit=limit,
from_date=from_date, to_date=to_date, cacheable=cacheable)
def get_unique_artist_chart(
self, tag="artist", limit=None, from_date=None, to_date=None,
cacheable=True):
"""Get a chart of the artists which make that metro unique.
Parameters:
from_date (Optional) : Beginning timestamp of the weekly range
requested
to_date (Optional) : Ending timestamp of the weekly range requested
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
return self._get_chart(
"geo.getMetroUniqueArtistChart", tag=tag, limit=limit,
from_date=from_date, to_date=to_date, cacheable=cacheable)
def get_track_chart(
self, tag="track", limit=None, from_date=None, to_date=None,
cacheable=True):
"""Get a chart of tracks for a metro.
Parameters:
from_date (Optional) : Beginning timestamp of the weekly range
requested
to_date (Optional) : Ending timestamp of the weekly range requested
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
return self._get_chart(
"geo.getMetroTrackChart", tag=tag, limit=limit,
from_date=from_date, to_date=to_date, cacheable=cacheable)
def get_hype_track_chart(
self, tag="track", limit=None, from_date=None, to_date=None,
cacheable=True):
"""Get a chart of tracks for a metro.
Parameters:
from_date (Optional) : Beginning timestamp of the weekly range
requested
to_date (Optional) : Ending timestamp of the weekly range requested
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
return self._get_chart(
"geo.getMetroHypeTrackChart", tag=tag,
limit=limit, from_date=from_date, to_date=to_date,
cacheable=cacheable)
def get_unique_track_chart(
self, tag="track", limit=None, from_date=None, to_date=None,
cacheable=True):
"""Get a chart of tracks for a metro.
Parameters:
from_date (Optional) : Beginning timestamp of the weekly range
requested
to_date (Optional) : Ending timestamp of the weekly range requested
limit (Optional) : The number of results to fetch per page.
Defaults to 50.
"""
return self._get_chart(
"geo.getMetroUniqueTrackChart", tag=tag, limit=limit,
from_date=from_date, to_date=to_date, cacheable=cacheable)
class Library(_BaseObject):
"""A user's Last.fm library."""
user = None
__hash__ = _BaseObject.__hash__
def __init__(self, user, network):
_BaseObject.__init__(self, network, 'library')
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self._albums_index = 0
self._artists_index = 0
self._tracks_index = 0
def __repr__(self):
return "pylast.Library(%s, %s)" % (repr(self.user), repr(self.network))
@_string_output
def __str__(self):
return repr(self.get_user()) + "'s Library"
def _get_params(self):
return {'user': self.user.get_name()}
def get_user(self):
"""Returns the user who owns this library."""
return self.user
def add_album(self, album):
"""Add an album to this library."""
params = self._get_params()
params["artist"] = album.get_artist().get_name()
params["album"] = album.get_name()
self._request("library.addAlbum", False, params)
def remove_album(self, album):
"""Remove an album from this library."""
params = self._get_params()
params["artist"] = album.get_artist().get_name()
params["album"] = album.get_name()
self._request(self.ws_prefix + ".removeAlbum", False, params)
def add_artist(self, artist):
"""Add an artist to this library."""
params = self._get_params()
if type(artist) == str:
params["artist"] = artist
else:
params["artist"] = artist.get_name()
self._request(self.ws_prefix + ".addArtist", False, params)
def remove_artist(self, artist):
"""Remove an artist from this library."""
params = self._get_params()
if type(artist) == str:
params["artist"] = artist
else:
params["artist"] = artist.get_name()
self._request(self.ws_prefix + ".removeArtist", False, params)
def add_track(self, track):
"""Add a track to this library."""
params = self._get_params()
params["track"] = track.get_title()
self._request(self.ws_prefix + ".addTrack", False, params)
def get_albums(self, artist=None, limit=50, cacheable=True):
"""
Returns a sequence of Album objects
If no artist is specified, it will return all, sorted by decreasing
play count.
If limit==None it will return all (may take a while)
"""
params = self._get_params()
if artist:
params["artist"] = artist
seq = []
for node in _collect_nodes(
limit,
self,
self.ws_prefix + ".getAlbums",
cacheable,
params):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(
Album(artist, name, self.network), playcount, tagcount))
return seq
def get_artists(self, limit=50, cacheable=True):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(
limit,
self,
self.ws_prefix + ".getArtists",
cacheable):
name = _extract(node, "name")
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(
Artist(name, self.network), playcount, tagcount))
return seq
def get_tracks(self, artist=None, album=None, limit=50, cacheable=True):
"""
Returns a sequence of Album objects
If limit==None it will return all (may take a while)
"""
params = self._get_params()
if artist:
params["artist"] = artist
if album:
params["album"] = album
seq = []
for node in _collect_nodes(
limit,
self,
self.ws_prefix + ".getTracks",
cacheable,
params):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(
Track(artist, name, self.network), playcount, tagcount))
return seq
def remove_scrobble(self, artist, title, timestamp):
"""Remove a scrobble from a user's Last.fm library. Parameters:
artist (Required) : The artist that composed the track
title (Required) : The name of the track
timestamp (Required) : The unix timestamp of the scrobble
that you wish to remove
"""
params = self._get_params()
params["artist"] = artist
params["track"] = title
params["timestamp"] = timestamp
self._request(self.ws_prefix + ".removeScrobble", False, params)
class Playlist(_BaseObject):
"""A Last.fm user playlist."""
id = None
user = None
__hash__ = _BaseObject.__hash__
def __init__(self, user, playlist_id, network):
_BaseObject.__init__(self, network, "playlist")
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self.id = playlist_id
@_string_output
def __str__(self):
return repr(self.user) + "'s playlist # " + repr(self.id)
def _get_info_node(self):
"""
Returns the node from user.getPlaylists where this playlist's info is.
"""
doc = self._request("user.getPlaylists", True)
for node in doc.getElementsByTagName("playlist"):
if _extract(node, "id") == str(self.get_id()):
return node
def _get_params(self):
return {'user': self.user.get_name(), 'playlistID': self.get_id()}
def get_id(self):
"""Returns the playlist ID."""
return self.id
def get_user(self):
"""Returns the owner user of this playlist."""
return self.user
def get_tracks(self):
"""Returns a list of the tracks on this user playlist."""
uri = _unicode('lastfm://playlist/%s') % self.get_id()
return XSPF(uri, self.network).get_tracks()
def add_track(self, track):
"""Adds a Track to this Playlist."""
params = self._get_params()
params['artist'] = track.get_artist().get_name()
params['track'] = track.get_title()
self._request('playlist.addTrack', False, params)
def get_title(self):
"""Returns the title of this playlist."""
return _extract(self._get_info_node(), "title")
def get_creation_date(self):
"""Returns the creation date of this playlist."""
return _extract(self._get_info_node(), "date")
def get_size(self):
"""Returns the number of tracks in this playlist."""
return _number(_extract(self._get_info_node(), "size"))
def get_description(self):
"""Returns the description of this playlist."""
return _extract(self._get_info_node(), "description")
def get_duration(self):
"""Returns the duration of this playlist in milliseconds."""
return _number(_extract(self._get_info_node(), "duration"))
def is_streamable(self):
"""
Returns True if the playlist is streamable.
For a playlist to be streamable, it needs at least 45 tracks by 15
different artists."""
if _extract(self._get_info_node(), "streamable") == '1':
return True
else:
return False
def has_track(self, track):
"""Checks to see if track is already in the playlist.
* track: Any Track object.
"""
return track in self.get_tracks()
def get_cover_image(self, size=COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract(self._get_info_node(), "image")[size]
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the playlist on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
english_url = _extract(self._get_info_node(), "url")
appendix = english_url[english_url.rfind("/") + 1:]
return self.network._get_url(domain_name, "playlist") % {
'appendix': appendix, "user": self.get_user().get_name()}
class Tag(_BaseObject, _Chartable):
"""A Last.fm object tag."""
name = None
__hash__ = _BaseObject.__hash__
def __init__(self, name, network):
_BaseObject.__init__(self, network, 'tag')
_Chartable.__init__(self, 'tag')
self.name = name
def __repr__(self):
return "pylast.Tag(%s, %s)" % (repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {self.ws_prefix: self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the name of the tag. """
if properly_capitalized:
self.name = _extract(
self._request(self.ws_prefix + ".getInfo", True), "name")
return self.name
def get_similar(self):
"""Returns the tags similar to this one, ordered by similarity. """
doc = self._request(self.ws_prefix + '.getSimilar', True)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(Tag(name, self.network))
return seq
def get_top_albums(self, limit=None, cacheable=True):
"""Retuns a list of the top albums."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request(
self.ws_prefix + '.getTopAlbums', cacheable, params)
return _extract_top_albums(doc, self.network)
def get_top_tracks(self, limit=None, cacheable=True):
"""Returns a list of the most played Tracks for this tag."""
params = self._get_params()
if limit:
params['limit'] = limit
return self._get_things(
"getTopTracks", "track", Track, params, cacheable)
def get_top_artists(self, limit=None, cacheable=True):
"""Returns a sequence of the most played artists."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request(
self.ws_prefix + '.getTopArtists', cacheable, params)
return _extract_top_artists(doc, self.network)
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the tag page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "tag") % {'name': name}
class Track(_Opus):
"""A Last.fm track."""
__hash__ = _Opus.__hash__
def __init__(self, artist, title, network, username=None):
super(Track, self).__init__(artist, title, network, "track", username)
def get_correction(self):
"""Returns the corrected track name."""
return _extract(
self._request(self.ws_prefix + ".getCorrection"), "name")
def get_duration(self):
"""Returns the track duration."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _number(_extract(doc, "duration"))
def get_userloved(self):
"""Whether the user loved this track"""
if not self.username:
return
params = self._get_params()
params['username'] = self.username
doc = self._request(self.ws_prefix + ".getInfo", True, params)
loved = _number(_extract(doc, "userloved"))
return bool(loved)
def is_streamable(self):
"""Returns True if the track is available at Last.fm."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "streamable") == "1"
def is_fulltrack_available(self):
"""Returns True if the fulltrack is available for streaming."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return doc.getElementsByTagName(
"streamable")[0].getAttribute("fulltrack") == "1"
def get_album(self):
"""Returns the album object of this track."""
doc = self._request(self.ws_prefix + ".getInfo", True)
albums = doc.getElementsByTagName("album")
if len(albums) == 0:
return
node = doc.getElementsByTagName("album")[0]
return Album(
_extract(node, "artist"), _extract(node, "title"), self.network)
def love(self):
"""Adds the track to the user's loved tracks. """
self._request(self.ws_prefix + '.love')
def unlove(self):
"""Remove the track to the user's loved tracks. """
self._request(self.ws_prefix + '.unlove')
def ban(self):
"""Ban this track from ever playing on the radio. """
self._request(self.ws_prefix + '.ban')
def get_similar(self):
"""
Returns similar tracks for this track on the network,
based on listening data.
"""
doc = self._request(self.ws_prefix + '.getSimilar', True)
seq = []
for node in doc.getElementsByTagName(self.ws_prefix):
title = _extract(node, 'name')
artist = _extract(node, 'name', 1)
match = _number(_extract(node, "match"))
seq.append(SimilarItem(Track(artist, title, self.network), match))
return seq
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the URL of the album or track page on the network.
# Parameters:
* domain_name str: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
title = _url_safe(self.get_title())
return self.network._get_url(
domain_name, self.ws_prefix) % {
'artist': artist, 'title': title}
class Group(_BaseObject, _Chartable):
"""A Last.fm group."""
name = None
__hash__ = _BaseObject.__hash__
def __init__(self, name, network):
_BaseObject.__init__(self, network, 'group')
_Chartable.__init__(self, 'group')
self.name = name
def __repr__(self):
return "pylast.Group(%s, %s)" % (repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {self.ws_prefix: self.get_name()}
def get_name(self):
"""Returns the group name. """
return self.name
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the group page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "group") % {'name': name}
def get_members(self, limit=50, cacheable=False):
"""
Returns a sequence of User objects
if limit==None it will return all
"""
nodes = _collect_nodes(
limit, self, self.ws_prefix + ".getMembers", cacheable)
users = []
for node in nodes:
users.append(User(_extract(node, "name"), self.network))
return users
class XSPF(_BaseObject):
"A Last.fm XSPF playlist."""
uri = None
__hash__ = _BaseObject.__hash__
def __init__(self, uri, network):
_BaseObject.__init__(self, network, None)
self.uri = uri
def _get_params(self):
return {'playlistURL': self.get_uri()}
@_string_output
def __str__(self):
return self.get_uri()
def __eq__(self, other):
return self.get_uri() == other.get_uri()
def __ne__(self, other):
return self.get_uri() != other.get_uri()
def get_uri(self):
"""Returns the Last.fm playlist URI. """
return self.uri
def get_tracks(self):
"""Returns the tracks on this playlist."""
doc = self._request('playlist.fetch', True)
seq = []
for node in doc.getElementsByTagName('track'):
title = _extract(node, 'title')
artist = _extract(node, 'creator')
seq.append(Track(artist, title, self.network))
return seq
class User(_BaseObject, _Chartable):
"""A Last.fm user."""
name = None
__hash__ = _BaseObject.__hash__
def __init__(self, user_name, network):
_BaseObject.__init__(self, network, 'user')
_Chartable.__init__(self, 'user')
self.name = user_name
self._past_events_index = 0
self._recommended_events_index = 0
self._recommended_artists_index = 0
def __repr__(self):
return "pylast.User(%s, %s)" % (repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, another):
if isinstance(another, User):
return self.get_name() == another.get_name()
else:
return False
def __ne__(self, another):
if isinstance(another, User):
return self.get_name() != another.get_name()
else:
return True
def _get_params(self):
return {self.ws_prefix: self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the user name."""
if properly_capitalized:
self.name = _extract(
self._request(self.ws_prefix + ".getInfo", True), "name")
return self.name
def get_upcoming_events(self):
"""Returns all the upcoming events for this user."""
doc = self._request(self.ws_prefix + '.getEvents', True)
return _extract_events_from_doc(doc, self.network)
def get_artist_tracks(self, artist, cacheable=False):
"""
Get a list of tracks by a given artist scrobbled by this user,
including scrobble time.
"""
# Not implemented:
# "Can be limited to specific timeranges, defaults to all time."
params = self._get_params()
params['artist'] = artist
seq = []
for track in _collect_nodes(
None,
self,
self.ws_prefix + ".getArtistTracks",
cacheable,
params):
title = _extract(track, "name")
artist = _extract(track, "artist")
date = _extract(track, "date")
album = _extract(track, "album")
timestamp = track.getElementsByTagName(
"date")[0].getAttribute("uts")
seq.append(PlayedTrack(
Track(artist, title, self.network), album, date, timestamp))
return seq
def get_friends(self, limit=50, cacheable=False):
"""Returns a list of the user's friends. """
seq = []
for node in _collect_nodes(
limit,
self,
self.ws_prefix + ".getFriends",
cacheable):
seq.append(User(_extract(node, "name"), self.network))
return seq
def get_loved_tracks(self, limit=50, cacheable=True):
"""
Returns this user's loved track as a sequence of LovedTrack objects in
reverse order of their timestamp, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates.
"""
params = self._get_params()
if limit:
params['limit'] = limit
seq = []
for track in _collect_nodes(
limit,
self,
self.ws_prefix + ".getLovedTracks",
cacheable,
params):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
date = _extract(track, "date")
timestamp = track.getElementsByTagName(
"date")[0].getAttribute("uts")
seq.append(LovedTrack(
Track(artist, title, self.network), date, timestamp))
return seq
def get_neighbours(self, limit=50, cacheable=True):
"""Returns a list of the user's friends."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request(
self.ws_prefix + '.getNeighbours', cacheable, params)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(User(name, self.network))
return seq
def get_past_events(self, limit=50, cacheable=False):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(
limit,
self,
self.ws_prefix + ".getPastEvents",
cacheable):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_playlists(self):
"""Returns a list of Playlists that this user owns."""
doc = self._request(self.ws_prefix + ".getPlaylists", True)
playlists = []
for playlist_id in _extract_all(doc, "id"):
playlists.append(
Playlist(self.get_name(), playlist_id, self.network))
return playlists
def get_now_playing(self):
"""
Returns the currently playing track, or None if nothing is playing.
"""
params = self._get_params()
params['limit'] = '1'
doc = self._request(self.ws_prefix + '.getRecentTracks', False, params)
tracks = doc.getElementsByTagName('track')
if len(tracks) == 0:
return None
e = tracks[0]
if not e.hasAttribute('nowplaying'):
return None
artist = _extract(e, 'artist')
title = _extract(e, 'name')
return Track(artist, title, self.network, self.name)
def get_recent_tracks(self, limit=10, cacheable=True,
time_from=None, time_to=None):
"""
Returns this user's played track as a sequence of PlayedTrack objects
in reverse order of playtime, all the way back to the first track.
Parameters:
limit : If None, it will try to pull all the available data.
from (Optional) : Beginning timestamp of a range - only display
scrobbles after this time, in UNIX timestamp format (integer
number of seconds since 00:00:00, January 1st 1970 UTC). This
must be in the UTC time zone.
to (Optional) : End timestamp of a range - only display scrobbles
before this time, in UNIX timestamp format (integer number of
seconds since 00:00:00, January 1st 1970 UTC). This must be in
the UTC time zone.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates.
"""
params = self._get_params()
if limit:
params['limit'] = limit
if time_from:
params['from'] = time_from
if time_to:
params['to'] = time_to
seq = []
for track in _collect_nodes(
limit,
self,
self.ws_prefix + ".getRecentTracks",
cacheable,
params):
if track.hasAttribute('nowplaying'):
continue # to prevent the now playing track from sneaking in
title = _extract(track, "name")
artist = _extract(track, "artist")
date = _extract(track, "date")
album = _extract(track, "album")
timestamp = track.getElementsByTagName(
"date")[0].getAttribute("uts")
seq.append(PlayedTrack(
Track(artist, title, self.network), album, date, timestamp))
return seq
def get_id(self):
"""Returns the user ID."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "id")
def get_language(self):
"""Returns the language code of the language used by the user."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "lang")
def get_country(self):
"""Returns the name of the country of the user."""
doc = self._request(self.ws_prefix + ".getInfo", True)
country = _extract(doc, "country")
if country is None:
return None
else:
return Country(country, self.network)
def get_age(self):
"""Returns the user's age."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _number(_extract(doc, "age"))
def get_gender(self):
"""Returns the user's gender. Either USER_MALE or USER_FEMALE."""
doc = self._request(self.ws_prefix + ".getInfo", True)
value = _extract(doc, "gender")
if value == 'm':
return USER_MALE
elif value == 'f':
return USER_FEMALE
return None
def is_subscriber(self):
"""Returns whether the user is a subscriber or not. True or False."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "subscriber") == "1"
def get_playcount(self):
"""Returns the user's playcount so far."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _number(_extract(doc, "playcount"))
def get_registered(self):
"""Returns the user's registration date."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "registered")
def get_unixtime_registered(self):
"""Returns the user's registration date as a UNIX timestamp."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return doc.getElementsByTagName(
"registered")[0].getAttribute("unixtime")
def get_tagged_albums(self, tag, limit=None, cacheable=True):
"""Returns the albums tagged by a user."""
params = self._get_params()
params['tag'] = tag
params['taggingtype'] = 'album'
if limit:
params['limit'] = limit
doc = self._request(self.ws_prefix + '.getpersonaltags', cacheable,
params)
return _extract_albums(doc, self.network)
def get_tagged_artists(self, tag, limit=None):
"""Returns the artists tagged by a user."""
params = self._get_params()
params['tag'] = tag
params['taggingtype'] = 'artist'
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + '.getpersonaltags', True, params)
return _extract_artists(doc, self.network)
def get_tagged_tracks(self, tag, limit=None, cacheable=True):
"""Returns the tracks tagged by a user."""
params = self._get_params()
params['tag'] = tag
params['taggingtype'] = 'track'
if limit:
params['limit'] = limit
doc = self._request(self.ws_prefix + '.getpersonaltags', cacheable,
params)
return _extract_tracks(doc, self.network)
def get_top_albums(
self, period=PERIOD_OVERALL, limit=None, cacheable=True):
"""Returns the top albums played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_1MONTH
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
if limit:
params['limit'] = limit
doc = self._request(
self.ws_prefix + '.getTopAlbums', cacheable, params)
return _extract_top_albums(doc, self.network)
def get_top_artists(self, period=PERIOD_OVERALL, limit=None):
"""Returns the top artists played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_1MONTH
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + '.getTopArtists', True, params)
return _extract_top_artists(doc, self.network)
def get_top_tags(self, limit=None, cacheable=True):
"""
Returns a sequence of the top tags used by this user with their counts
as TopItem objects.
* limit: The limit of how many tags to return.
* cacheable: Whether to cache results.
"""
params = self._get_params()
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getTopTags", cacheable, params)
seq = []
for node in doc.getElementsByTagName("tag"):
seq.append(TopItem(
Tag(_extract(node, "name"), self.network),
_extract(node, "count")))
return seq
def get_top_tracks(
self, period=PERIOD_OVERALL, limit=None, cacheable=True):
"""Returns the top tracks played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_1MONTH
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
if limit:
params['limit'] = limit
return self._get_things(
"getTopTracks", "track", Track, params, cacheable)
def compare_with_user(self, user, shared_artists_limit=None):
"""
Compare this user with another Last.fm user.
Returns a sequence:
(tasteometer_score, (shared_artist1, shared_artist2, ...))
user: A User object or a username string/unicode object.
"""
if isinstance(user, User):
user = user.get_name()
params = self._get_params()
if shared_artists_limit:
params['limit'] = shared_artists_limit
params['type1'] = 'user'
params['type2'] = 'user'
params['value1'] = self.get_name()
params['value2'] = user
doc = self._request('tasteometer.compare', False, params)
score = _extract(doc, 'score')
artists = doc.getElementsByTagName('artists')[0]
shared_artists_names = _extract_all(artists, 'name')
shared_artists_seq = []
for name in shared_artists_names:
shared_artists_seq.append(Artist(name, self.network))
return (score, shared_artists_seq)
def get_image(self):
"""Returns the user's avatar."""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract(doc, "image")
def get_url(self, domain_name=DOMAIN_ENGLISH):
"""Returns the url of the user page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "user") % {'name': name}
def get_library(self):
"""Returns the associated Library object. """
return Library(self, self.network)
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request(self.ws_prefix + ".Shout", False, params)
class AuthenticatedUser(User):
def __init__(self, network):
User.__init__(self, "", network)
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the name of the authenticated user."""
doc = self._request("user.getInfo", True, {"user": ""}) # hack
self.name = _extract(doc, "name")
return self.name
def get_recommended_events(self, limit=50, cacheable=False):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(
limit, self, "user.getRecommendedEvents", cacheable):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_recommended_artists(self, limit=50, cacheable=False):
"""
Returns a sequence of Artist objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(
limit, self, "user.getRecommendedArtists", cacheable):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class _Search(_BaseObject):
"""An abstract class. Use one of its derivatives."""
def __init__(self, ws_prefix, search_terms, network):
_BaseObject.__init__(self, network, ws_prefix)
self._ws_prefix = ws_prefix
self.search_terms = search_terms
self._last_page_index = 0
def _get_params(self):
params = {}
for key in self.search_terms.keys():
params[key] = self.search_terms[key]
return params
def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + ".search", True)
return _extract(doc, "opensearch:totalResults")
def _retrieve_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
def _retrieve_next_page(self):
self._last_page_index += 1
return self._retrieve_page(self._last_page_index)
class AlbumSearch(_Search):
"""Search for an album by name."""
def __init__(self, album_name, network):
_Search.__init__(self, "album", {"album": album_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Album objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("album"):
seq.append(Album(
_extract(node, "artist"),
_extract(node, "name"),
self.network))
return seq
class ArtistSearch(_Search):
"""Search for an artist by artist name."""
def __init__(self, artist_name, network):
_Search.__init__(self, "artist", {"artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Artist objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("artist"):
artist = Artist(_extract(node, "name"), self.network)
artist.listener_count = _number(_extract(node, "listeners"))
seq.append(artist)
return seq
class TagSearch(_Search):
"""Search for a tag by tag name."""
def __init__(self, tag_name, network):
_Search.__init__(self, "tag", {"tag": tag_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Tag objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self.network)
tag.tag_count = _number(_extract(node, "count"))
seq.append(tag)
return seq
class TrackSearch(_Search):
"""
Search for a track by track title. If you don't want to narrow the results
down by specifying the artist name, set it to empty string.
"""
def __init__(self, artist_name, track_title, network):
_Search.__init__(
self,
"track",
{"track": track_title, "artist": artist_name},
network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("track"):
track = Track(
_extract(node, "artist"),
_extract(node, "name"),
self.network)
track.listener_count = _number(_extract(node, "listeners"))
seq.append(track)
return seq
class VenueSearch(_Search):
"""
Search for a venue by its name. If you don't want to narrow the results
down by specifying a country, set it to empty string.
"""
def __init__(self, venue_name, country_name, network):
_Search.__init__(
self,
"venue",
{"venue": venue_name, "country": country_name},
network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("venue"):
seq.append(Venue(_extract(node, "id"), self.network))
return seq
class Venue(_BaseObject):
"""A venue where events are held."""
# TODO: waiting for a venue.getInfo web service to use.
# TODO: As an intermediate use case, can pass the venue DOM element when
# using Event.get_venue() to populate the venue info, if the venue.getInfo
# API call becomes available this workaround should be removed
id = None
info = None
name = None
location = None
url = None
__hash__ = _BaseObject.__hash__
def __init__(self, netword_id, network, venue_element=None):
_BaseObject.__init__(self, network, "venue")
self.id = _number(netword_id)
if venue_element is not None:
self.info = _extract_element_tree(venue_element)
self.name = self.info.get('name')
self.url = self.info.get('url')
self.location = self.info.get('location')
def __repr__(self):
return "pylast.Venue(%s, %s)" % (repr(self.id), repr(self.network))
@_string_output
def __str__(self):
return "Venue #" + str(self.id)
def __eq__(self, other):
return self.get_id() == other.get_id()
def _get_params(self):
return {self.ws_prefix: self.get_id()}
def get_id(self):
"""Returns the id of the venue."""
return self.id
def get_name(self):
"""Returns the name of the venue."""
return self.name
def get_url(self):
"""Returns the URL of the venue page."""
return self.url
def get_location(self):
"""Returns the location of the venue (dictionary)."""
return self.location
def get_upcoming_events(self):
"""Returns the upcoming events in this venue."""
doc = self._request(self.ws_prefix + ".getEvents", True)
return _extract_events_from_doc(doc, self.network)
def get_past_events(self):
"""Returns the past events held in this venue."""
doc = self._request(self.ws_prefix + ".getEvents", True)
return _extract_events_from_doc(doc, self.network)
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_unicode(text).encode("utf-8"))
return h.hexdigest()
def _unicode(text):
if isinstance(text, six.binary_type):
return six.text_type(text, "utf-8")
elif isinstance(text, six.text_type):
return text
else:
return six.text_type(text)
def _string(string):
"""For Python2 routines that can only process str type."""
if isinstance(string, str):
return string
casted = six.text_type(string)
if sys.version_info[0] == 2:
casted = casted.encode("utf-8")
return casted
def cleanup_nodes(doc):
"""
Remove text nodes containing only whitespace
"""
for node in doc.documentElement.childNodes:
if node.nodeType == Node.TEXT_NODE and node.nodeValue.isspace():
doc.documentElement.removeChild(node)
return doc
def _collect_nodes(limit, sender, method_name, cacheable, params=None):
"""
Returns a sequence of dom.Node objects about as close to limit as possible
"""
if not params:
params = sender._get_params()
nodes = []
page = 1
end_of_pages = False
while not end_of_pages and (not limit or (limit and len(nodes) < limit)):
params["page"] = str(page)
doc = sender._request(method_name, cacheable, params)
doc = cleanup_nodes(doc)
main = doc.documentElement.childNodes[0]
if main.hasAttribute("totalPages"):
total_pages = _number(main.getAttribute("totalPages"))
elif main.hasAttribute("totalpages"):
total_pages = _number(main.getAttribute("totalpages"))
else:
raise Exception("No total pages attribute")
for node in main.childNodes:
if not node.nodeType == xml.dom.Node.TEXT_NODE and (
not limit or (len(nodes) < limit)):
nodes.append(node)
if page >= total_pages:
end_of_pages = True
page += 1
return nodes
def _extract(node, name, index=0):
"""Extracts a value from the xml string"""
nodes = node.getElementsByTagName(name)
if len(nodes):
if nodes[index].firstChild:
return _unescape_htmlentity(nodes[index].firstChild.data.strip())
else:
return None
def _extract_element_tree(node):
"""Extract an element tree into a multi-level dictionary
NB: If any elements have text nodes as well as nested
elements this will ignore the text nodes"""
def _recurse_build_tree(rootNode, targetDict):
"""Recursively build a multi-level dict"""
def _has_child_elements(rootNode):
"""Check if an element has any nested (child) elements"""
for node in rootNode.childNodes:
if node.nodeType == node.ELEMENT_NODE:
return True
return False
for node in rootNode.childNodes:
if node.nodeType == node.ELEMENT_NODE:
if _has_child_elements(node):
targetDict[node.tagName] = {}
_recurse_build_tree(node, targetDict[node.tagName])
else:
val = None if node.firstChild is None else \
_unescape_htmlentity(node.firstChild.data.strip())
targetDict[node.tagName] = val
return targetDict
return _recurse_build_tree(node, {})
def _extract_all(node, name, limit_count=None):
"""Extracts all the values from the xml string. returning a list."""
seq = []
for i in range(0, len(node.getElementsByTagName(name))):
if len(seq) == limit_count:
break
seq.append(_extract(node, name, i))
return seq
def _extract_top_artists(doc, network):
# TODO Maybe include the _request here too?
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, "name")
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, network), playcount))
return seq
def _extract_top_albums(doc, network):
# TODO Maybe include the _request here too?
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, network), playcount))
return seq
def _extract_artists(doc, network):
seq = []
for node in doc.getElementsByTagName("artist"):
seq.append(Artist(_extract(node, "name"), network))
return seq
def _extract_albums(doc, network):
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
seq.append(Album(artist, name, network))
return seq
def _extract_tracks(doc, network):
seq = []
for node in doc.getElementsByTagName("track"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
seq.append(Track(artist, name, network))
return seq
def _extract_events_from_doc(doc, network):
events = []
for node in doc.getElementsByTagName("event"):
events.append(Event(_extract(node, "id"), network))
return events
def _url_safe(text):
"""Does all kinds of tricks on a text to make it safe to use in a url."""
return url_quote_plus(url_quote_plus(_string(text))).lower()
def _number(string):
"""
Extracts an int from a string.
Returns a 0 if None or an empty string was passed.
"""
if not string:
return 0
elif string == "":
return 0
else:
try:
return int(string)
except ValueError:
return float(string)
def _unescape_htmlentity(string):
# string = _unicode(string)
mapping = htmlentitydefs.name2codepoint
for key in mapping:
string = string.replace("&%s;" % key, unichr(mapping[key]))
return string
def extract_items(topitems_or_libraryitems):
"""
Extracts a sequence of items from a sequence of TopItem or
LibraryItem objects.
"""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq
class ScrobblingError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
@_string_output
def __str__(self):
return self.message
class BannedClientError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(
self, "This version of the client has been banned")
class BadAuthenticationError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad authentication token")
class BadTimeError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(
self, "Time provided is not close enough to current time")
class BadSessionError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(
self, "Bad session id, consider re-handshaking")
class _ScrobblerRequest(object):
def __init__(self, url, params, network, request_type="POST"):
for key in params:
params[key] = str(params[key])
self.params = params
self.type = request_type
(self.hostname, self.subdir) = url_split_host(url[len("http:"):])
self.network = network
def execute(self):
"""Returns a string response of this request."""
if _can_use_ssl_securely():
connection = HTTPSConnection(
context=SSL_CONTEXT,
host=self.hostname
)
else:
connection = HTTPConnection(
host=self.hostname
)
data = []
for name in self.params.keys():
value = url_quote_plus(self.params[name])
data.append('='.join((name, value)))
data = "&".join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept-Charset": "utf-8",
"User-Agent": "pylast" + "/" + __version__,
"HOST": self.hostname
}
if self.type == "GET":
connection.request(
"GET", self.subdir + "?" + data, headers=headers)
else:
connection.request("POST", self.subdir, data, headers)
response = _unicode(connection.getresponse().read())
self._check_response_for_errors(response)
return response
def _check_response_for_errors(self, response):
"""
When passed a string response it checks for errors, raising any
exceptions as necessary.
"""
lines = response.split("\n")
status_line = lines[0]
if status_line == "OK":
return
elif status_line == "BANNED":
raise BannedClientError()
elif status_line == "BADAUTH":
raise BadAuthenticationError()
elif status_line == "BADTIME":
raise BadTimeError()
elif status_line == "BADSESSION":
raise BadSessionError()
elif status_line.startswith("FAILED "):
reason = status_line[status_line.find("FAILED ") + len("FAILED "):]
raise ScrobblingError(reason)
class Scrobbler(object):
"""A class for scrobbling tracks to Last.fm"""
session_id = None
nowplaying_url = None
submissions_url = None
def __init__(self, network, client_id, client_version):
self.client_id = client_id
self.client_version = client_version
self.username = network.username
self.password = network.password_hash
self.network = network
def _do_handshake(self):
"""Handshakes with the server"""
timestamp = str(int(time.time()))
if self.password and self.username:
token = md5(self.password + timestamp)
elif self.network.api_key and self.network.api_secret and \
self.network.session_key:
if not self.username:
self.username = self.network.get_authenticated_user()\
.get_name()
token = md5(self.network.api_secret + timestamp)
params = {
"hs": "true", "p": "1.2.1", "c": self.client_id,
"v": self.client_version, "u": self.username, "t": timestamp,
"a": token}
if self.network.session_key and self.network.api_key:
params["sk"] = self.network.session_key
params["api_key"] = self.network.api_key
server = self.network.submission_server
response = _ScrobblerRequest(
server, params, self.network, "GET").execute().split("\n")
self.session_id = response[1]
self.nowplaying_url = response[2]
self.submissions_url = response[3]
def _get_session_id(self, new=False):
"""
Returns a handshake. If new is true, then it will be requested from
the server even if one was cached.
"""
if not self.session_id or new:
self._do_handshake()
return self.session_id
def report_now_playing(
self, artist, title, album="", duration="", track_number="",
mbid=""):
_deprecation_warning(
"DeprecationWarning: Use Network.update_now_playing(...) instead")
params = {
"s": self._get_session_id(), "a": artist, "t": title,
"b": album, "l": duration, "n": track_number, "m": mbid}
try:
_ScrobblerRequest(
self.nowplaying_url, params, self.network
).execute()
except BadSessionError:
self._do_handshake()
self.report_now_playing(
artist, title, album, duration, track_number, mbid)
def scrobble(
self, artist, title, time_started, source, mode, duration,
album="", track_number="", mbid=""):
"""Scrobble a track. parameters:
artist: Artist name.
title: Track title.
time_started: UTC timestamp of when the track started playing.
source: The source of the track
SCROBBLE_SOURCE_USER: Chosen by the user
(the most common value, unless you have a reason for
choosing otherwise, use this).
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST: Non-personalised
broadcast (e.g. Shoutcast, BBC Radio 1).
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST: Personalised
recommendation except Last.fm (e.g. Pandora, Launchcast).
SCROBBLE_SOURCE_LASTFM: ast.fm (any mode). In this case, the
5-digit recommendation_key value must be set.
SCROBBLE_SOURCE_UNKNOWN: Source unknown.
mode: The submission mode
SCROBBLE_MODE_PLAYED: The track was played.
SCROBBLE_MODE_LOVED: The user manually loved the track
(implies a listen)
SCROBBLE_MODE_SKIPPED: The track was skipped
(Only if source was Last.fm)
SCROBBLE_MODE_BANNED: The track was banned
(Only if source was Last.fm)
duration: Track duration in seconds.
album: The album name.
track_number: The track number on the album.
mbid: MusicBrainz ID.
"""
_deprecation_warning(
"DeprecationWarning: Use Network.scrobble(...) instead")
params = {
"s": self._get_session_id(),
"a[0]": _string(artist),
"t[0]": _string(title),
"i[0]": str(time_started),
"o[0]": source,
"r[0]": mode,
"l[0]": str(duration),
"b[0]": _string(album),
"n[0]": track_number,
"m[0]": mbid
}
_ScrobblerRequest(self.submissions_url, params, self.network).execute()
def scrobble_many(self, tracks):
"""
Scrobble several tracks at once.
tracks: A sequence of a sequence of parameters for each track.
The order of parameters is the same as if passed to the
scrobble() method.
"""
_deprecation_warning(
"DeprecationWarning: Use Network.scrobble_many(...) instead")
remainder = []
if len(tracks) > 50:
remainder = tracks[50:]
tracks = tracks[:50]
params = {"s": self._get_session_id()}
i = 0
for t in tracks:
_pad_list(t, 9, "")
params["a[%s]" % str(i)] = _string(t[0])
params["t[%s]" % str(i)] = _string(t[1])
params["i[%s]" % str(i)] = str(t[2])
params["o[%s]" % str(i)] = t[3]
params["r[%s]" % str(i)] = t[4]
params["l[%s]" % str(i)] = str(t[5])
params["b[%s]" % str(i)] = _string(t[6])
params["n[%s]" % str(i)] = t[7]
params["m[%s]" % str(i)] = t[8]
i += 1
_ScrobblerRequest(self.submissions_url, params, self.network).execute()
if remainder:
self.scrobble_many(remainder)
# End of file
|
the-stack_0_8894 | from datetime import datetime
from decimal import Decimal
import unittest
from werkzeug.datastructures import MultiDict
from pytz import timezone, utc
import pytest
from coaster.utils import LabeledEnum
import baseframe.forms as forms
from .fixtures import app1 as app
class MY_ENUM(LabeledEnum): # NOQA: N801
FIRST = (1, 'first', "First")
SECOND = (2, 'second', "Second")
THIRD = (3, 'third', "Third")
__order__ = (FIRST, SECOND, THIRD)
DEFAULT_JSONDATA = {'key': "val"}
class EnumForm(forms.Form):
position = forms.EnumSelectField("Position", lenum=MY_ENUM, default=MY_ENUM.THIRD)
position_no_default = forms.EnumSelectField(
"Position Without Default", lenum=MY_ENUM
)
class JsonForm(forms.Form):
jsondata = forms.JsonField("JSON Data", default=DEFAULT_JSONDATA)
jsondata_empty_default = forms.JsonField("JSON Data", default={})
jsondata_no_default = forms.JsonField("JSON No Default")
jsondata_no_dict = forms.JsonField("JSON No Dict", require_dict=False)
jsondata_no_decimal = forms.JsonField("JSON No Decimal", use_decimal=False)
class DateTimeForm(forms.Form):
naive = forms.DateTimeField("Date/time Field", naive=True, timezone='Asia/Kolkata')
aware = forms.DateTimeField("Date/time Field", naive=False, timezone='Asia/Kolkata')
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.ctx = app.test_request_context()
self.ctx.push()
def tearDown(self):
self.ctx.pop()
class TestEnumField(BaseTestCase):
def setUp(self):
super().setUp()
self.form = EnumForm(meta={'csrf': False})
def test_default(self):
assert self.form.position.data == 3
assert self.form.position_no_default.data is None
def test_process_valid(self):
self.form.process(
formdata=MultiDict({'position': 'second', 'position_no_default': 'third'})
)
assert self.form.validate() is True
assert self.form.position.data == 2
assert self.form.position_no_default.data == 3
def test_process_invalid(self):
self.form.process(formdata=MultiDict({'position': 'fourth'}))
assert self.form.validate() is False
def test_render(self):
assert (
self.form.position()
== '<select id="position" name="position"><option value="first">First</option><option value="second">Second</option><option selected value="third">Third</option></select>'
)
assert (
self.form.position_no_default()
== '<select id="position_no_default" name="position_no_default"><option value="first">First</option><option value="second">Second</option><option value="third">Third</option></select>'
)
class TestJsonField(BaseTestCase):
def setUp(self):
super().setUp()
self.form = JsonForm(meta={'csrf': False})
def test_default(self):
assert self.form.jsondata.data == DEFAULT_JSONDATA
assert self.form.jsondata_empty_default.data == {}
assert self.form.jsondata_no_default.data is None
def test_valid(self):
self.form.process(formdata=MultiDict({'jsondata': '{"key": "val"}'}))
assert self.form.validate() is True
def test_invalid(self):
self.form.process(
formdata=MultiDict({'jsondata': '{"key"; "val"}'})
) # invalid JSON
assert self.form.validate() is False
def test_empty_default(self):
self.form.process(
formdata=MultiDict(
{
'jsondata': '',
'jsondata_no_default': '',
'jsondata_empty_default': '',
}
)
)
assert self.form.jsondata.data == DEFAULT_JSONDATA
assert self.form.jsondata_empty_default.data == {}
assert self.form.jsondata_no_default.data is None
def test_nondict(self):
self.form.process(formdata=MultiDict({'jsondata': '43'}))
assert self.form.validate() is False
self.form.process(formdata=MultiDict({'jsondata': 'true'}))
assert self.form.validate() is False
self.form.process(formdata=MultiDict({'jsondata_no_dict': '43'}))
assert self.form.validate() is True
self.form.process(formdata=MultiDict({'jsondata_no_dict': 'true'}))
assert self.form.validate() is True
def test_unicode(self):
self.form.process(formdata=MultiDict({'jsondata': '{"key": "val😡"}'}))
assert self.form.validate() is True
assert self.form.jsondata.data == {"key": "val😡"}
def test_unicode_dumps(self):
self.form.jsondata.data = {"key": "val😡"}
assert self.form.jsondata._value() == '{\n "key": "val😡"\n}'
def test_decimal(self):
self.form.jsondata.data = {"key": Decimal('1.2')}
assert self.form.validate() is True
assert self.form.jsondata._value() == '{\n "key": 1.2\n}'
self.form.process(formdata=MultiDict({'jsondata': '{"key": 1.2}'}))
assert self.form.validate() is True
assert self.form.jsondata.data == {"key": Decimal('1.2')}
self.form.jsondata_no_decimal.data = {"key": Decimal('1.2')}
with self.assertRaises(TypeError):
self.form.jsondata_no_decimal._value()
self.form.process(formdata=MultiDict({'jsondata_no_decimal': '{"key": 1.2}'}))
assert self.form.validate() is True
assert self.form.jsondata_no_decimal.data == {"key": 1.2}
def test_array(self):
self.form.process(
formdata=MultiDict({'jsondata': '[{"key": "val"}, {"key2": "val2"}]'})
)
assert self.form.validate() is False
self.form.process(
formdata=MultiDict(
{'jsondata_no_dict': '[{"key": "val"}, {"key2": "val2"}]'}
)
)
assert self.form.validate() is True
assert self.form.jsondata_no_dict.data == [{"key": "val"}, {"key2": "val2"}]
def test_comment(self):
self.form.process(
formdata=MultiDict(
{
'jsondata': """
{
"key": "val" # test comment
}
"""
}
)
)
assert self.form.validate() is False
def test_non_serializable(self):
self.form.jsondata.data = {"key": datetime.now()}
with self.assertRaises(TypeError):
self.form.jsondata._value()
def test_escaped_label_text(self):
label = forms.Label('test', '<script>alert("test");</script>')
self.assertEqual(
label(for_='foo'),
"""<label for="foo"><script>alert("test");</script></label>""",
)
self.assertEqual(
label(**{'for': 'bar'}),
"""<label for="bar"><script>alert("test");</script></label>""",
)
# The fields are marked as timezone Asia/Kolkata, so local timestamps will be cast to
# UTC with 5:30 hours removed
@pytest.mark.parametrize(
['test_input', 'expected_naive', 'expected_aware'],
[
# Blank input
([], None, None),
([''], None, None),
(['', ''], None, None),
(
['2010-12-15'],
datetime(2010, 12, 14, 18, 30),
datetime(2010, 12, 14, 18, 30, tzinfo=utc),
),
(
['2010-12-15T10:00'],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['2010-12-15', ''],
datetime(2010, 12, 14, 18, 30),
datetime(2010, 12, 14, 18, 30, tzinfo=utc),
),
(
['2010-12-15 10:00'],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['2010-12-15', '10:00'],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['2010-12-15 ', ' 10:00 '],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['15/12/2010', '10:00'],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['12/15/2010', '10:00'],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['Dec 15 2010', '10:00'],
datetime(2010, 12, 15, 4, 30),
datetime(2010, 12, 15, 4, 30, tzinfo=utc),
),
(
['Dec 15 2010', '10:00 UTC'],
datetime(2010, 12, 15, 10, 0),
datetime(2010, 12, 15, 10, 0, tzinfo=utc),
),
(
['15 Dec 2010', '10:00 UTC'],
datetime(2010, 12, 15, 10, 0),
datetime(2010, 12, 15, 10, 0, tzinfo=utc),
),
(
['2021-06-08T10:00'],
datetime(2021, 6, 8, 4, 30),
datetime(2021, 6, 8, 4, 30, tzinfo=utc),
),
(
['06/08/2021', '10:00'], # MDY order
datetime(2021, 6, 8, 4, 30),
datetime(2021, 6, 8, 4, 30, tzinfo=utc),
),
],
)
def test_date_time_field(test_input, expected_naive, expected_aware):
"""Assert various datetime inputs are recogized and processed accurately."""
with app.app_context():
form = DateTimeForm(meta={'csrf': False})
form.process(
formdata=MultiDict(
[('naive', _v) for _v in test_input]
+ [('aware', _v) for _v in test_input],
)
)
assert form.naive.data == expected_naive
assert form.aware.data == expected_aware
if expected_naive is not None:
assert form.naive._value() == utc.localize(expected_naive).astimezone(
form.naive.timezone
).strftime(form.naive.display_format)
else:
assert form.naive._value() == ''
if expected_aware is not None:
assert form.aware._value() == expected_aware.astimezone(
form.aware.timezone
).strftime(form.aware.display_format)
else:
assert form.aware._value() == ''
@pytest.mark.parametrize(
'test_input',
[
'2020-2020-2020',
'100000-01-01',
],
)
def test_date_time_field_badvalue(test_input):
"""Assert bad datetime input is recorded as a ValidationError."""
with app.app_context():
form = DateTimeForm(meta={'csrf': False})
form.process(formdata=MultiDict({'naive': test_input, 'aware': test_input}))
form.validate()
assert form.naive.errors == [form.naive.message]
assert form.aware.errors == [form.aware.message]
def test_date_time_field_timezone():
"""Assert timezone in DateTimeField is an object."""
with app.app_context():
form = DateTimeForm(meta={'csrf': False})
assert form.naive.timezone == timezone('Asia/Kolkata')
assert form.aware.timezone == timezone('Asia/Kolkata')
form.naive.timezone = None
assert form.naive.timezone is not None # Picked up from get_timezone
|
the-stack_0_8895 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for margin_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from large_margin import margin_loss
class MarginLossTest(tf.test.TestCase, parameterized.TestCase):
def test_import(self):
self.assertIsNotNone(margin_loss)
@parameterized.parameters(
(i, j, k) for i in [1, 2, np.inf] for j in [1, 5]
for k in [True, False])
def test_loss(self, dist_norm, top_k, worst_case_loss):
image_shape = (12, 12, 1)
num_classes = 10
batch_size = 3
images = tf.convert_to_tensor(
np.random.rand(*((batch_size,) + image_shape)), dtype=tf.float32)
labels = tf.convert_to_tensor(
np.random.randint(0, high=num_classes, size=batch_size), dtype=tf.int32)
# Toy model.
endpoints = {}
endpoints["input_layer"] = images
# Convolution layer.
net = tf.layers.conv2d(
images,
filters=8,
kernel_size=3,
strides=(1, 1),
padding="same",
activation=tf.nn.relu)
endpoints["conv_layer"] = net
# Global average pooling layer.
net = tf.reduce_mean(net, axis=[1, 2])
# Output layer.
logits = tf.layers.dense(net, num_classes)
loss = margin_loss.large_margin(
logits=logits,
one_hot_labels=tf.one_hot(labels, num_classes),
layers_list=[endpoints["input_layer"], endpoints["conv_layer"]],
gamma=10000,
alpha_factor=4,
top_k=top_k,
dist_norm=dist_norm,
worst_case_loss=worst_case_loss)
var_list = tf.global_variables()
init = tf.global_variables_initializer()
# Test gradients are not None.
gs = tf.gradients(loss, var_list)
for g in gs:
self.assertIsNotNone(g)
# Test loss shape.
with self.test_session() as sess:
sess.run(init)
self.assertEqual(sess.run(loss).shape, ())
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_8896 | # Copyright (c) 2021 Zenqi
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import time
from lemondb.middleware.base import BaseMiddleware
from lemondb.plugin import (
BasePlugin,
LemonPlugin
)
import pathlib
from lemondb.types import (
Optional,
Any,
Lambda,
Iterable,
Mapping,
)
from lemondb.query import (
SearchQuery,
Linq,
LemonCursor
)
from lemondb.server import (
LemonServer,
LemonClient
)
import socket
from urllib.parse import (
parse_qsl,
urlparse,
)
from lemondb.utils import (
iterate_dict,
typenize,
untypenize
)
from lemondb.middleware import JsonMiddleware
from lemondb.storage import LemonStorage
from lemondb.constants import ops
from lemondb.logger import logger
from lemondb.errors import SearchQueryError
from lemondb.globals import version
from warnings import warn
import re
def catch_exceptions(decorator=None):
"""
A Decorator used for catching exception. This decorator
is wrapper to check weather the logger (loguru) plugin
is installed and use it as decorator else ignore. Since
loguru.catch does not accept functions it should be used
directly as a decorator.
"""
condition = True if logger else False
if not decorator:
decorator = logger.catch if logger else None
def deco(func):
if not condition:
return func
return decorator(func)
return deco
class LemonDB:
"""
NOTE: For Server & Client used. Kindly use the scheme lemondb://
or http:// to automatically detect if it is client or
server or manually pass keyword arguments if the given name
is client or server to avoid slow performance.
LemonDB is a simple and lightweight document oriented database
written in pure Python 3 tried on version: `3.9` & `3.8`. It
should work on versions <= 3.7. This class handle all operation
including storing document on a file.
For Server & Client, make sure to use the lemondb:// as the scheme
for the server. This recognized and parsed the host, port and the
keyword argument given by the query string.
Based on performance, LemonDB comes in first place ahead of the
popular `TinyDB`, but it is not expected to replace `TinyDB`.
Here are the result for the database operation that store
1000 random generated strings.
LemonDB: 20.848030 / 20.85 seconds
TinyDB: 53.912508 / 53.91 seconds
It is actually 2x faster than the TinyDB. It can be a little bit
faster since LemonDB support different type of JSON Serialization
that is faster than the standard `json` library. It supports:
- `simplejson (Estimated result for 1000 insert operation: 27.86 sec)`
- `ujson (Estimated result for 1000 insert operation: 22.88 sec)`
- `hyperjson (Estimated result for 1000 insert operation: 20.18 sec)`
NOTE: LemonDB support table operation where you stored a data inside
a table. You can create / get the table by calling the `table` method:
>>> from lemondb import LemonDB
>>> db = LemonDB('lemon.json')
>>> names = db.table('name') #: Create / Get the table .
>>> names.insert({'name': 'John Doe'})
>>> {'name': 'John Doe'}
Last but not the least, LemonDB support a database encryption with
password also known as Sidle Encryption (https://github.com/zxnqi/sidle).
By default LemonDB allows you to install the `sidle` library in order
to do the operation. You can access it by using the standard middleware:
`lemondb.middleware.SidleMiddleware` that accept a positional arguments
`password`. Also, make sure to include the `lemon.plugin.SidlePlugin`.
>>> from lemondb import LemonDB
>>> from lemondb.plugin impor SidlePlugin
>>> from lemondb.middleware import SidleMiddleware
>>> ...
>>> db = (
>>> 'test.json',
>>> middleware_cls=SidleMiddleware('password'),
>>> plugin_cls=SidlePlugin
>>> ...
Parameters:
:param name (str):
The name of the database. It can be a file name.
:param plugin_cls (BasePlugin : Optional):
The base plugin for Lemon DB. The plugin runs
everytime the database is called or initialized.
Default value: LemonPlugin
:param middleware_cls (BaseMiddleware : Optional):
The middleware for the document that handles read,
write and delete operation on the file given.
Default Value: JsonMiddleware
:param storage_cls (Storage):
Set the storage class for read and writing data.
Default Value: LemonStorage
Server Example:
>>> from lemondb import LemonDB
>>> db = LemonDB('lemondb://0.0.0.0:3000', server=True)
Client Example:
>>> from lemondb import LemonDB
>>> db = LemonDB('lemondb://localhost:3000', client=True)
>>> db.insert({'name': 'John Doe'})
Example:
>>> from lemondb import LemonDB
>>> db = LemonDB('test.json')
>>> db.insert({
>>> 'name': 'John Doe'
>>> })
>>> {'name': 'John Doe'}
>>> ...
>>> #: For query searching
>>> from lemondb import Query
>>> query = Query()
>>> db.search(query.name == 'John Doe')
>>> [{'name': 'John Doe'}]
Release Changes: v.0.0.3:
The new release v.0.0.3 has added new features. The new
Socket Server & Client feature so that you can run the
database on a VPS or any hosting server.
Release Changes: v0.0.7:
Massive bug fixed including the server & client. Uses UDP
socket implemention instead for faster performance. Also
added several queries such as dict to make things easier.
Release Changes: v1.0.0b
Added multi support for types that json serializer can't
serialize such as `bytes`, `datetime` and more. Also added
the versioning of the database.
Example:
>>> from lemondb import LemonDB
>>> from datetime import datetime
>>> db = LemonDB('db')
>>> db.insert({'time_id': 0, 'time': datetime.now()})
>>> ...
>>> #: Searching for the database
>>> db.find_one({'time_id'})
>>> {'time_id': 0, 'time': datetime.datetime(...)}
"""
#: The path for the database.
db_path: pathlib.Path
#: The default table for the database
default_table: str = "_table"
#: LemonCLient Instance
#: versionAdded: 0.0.3
client_instance: LemonClient = None
#: LemonServer Instance
#: versionAdded: 0.0.3
server_instance: LemonServer = None
#: Logger instance
logger = None
def __init__(
self,
name: str,
plugin_cls: Optional[BasePlugin] = None,
middleware_cls: Optional[BaseMiddleware] = None,
storage_cls: Optional[LemonStorage] = None,
**kwargs
):
"""
Initialize Lemon DB
Parameters:
:param name (str):
The name of the database. It can be a file name.
:param plugin_cls (BasePlugin : Optional):
The base plugin for Lemon DB. The plugin runs
everytime the database is called or initialized.
Default value: LemonPlugin
:param middleware_cls (BaseMiddleware : Optional):
The middleware for the document that handles read,
write and delete operation on the file given.
Default Value: JsonMiddleware
:param storage_cls (Storage):
Set the storage class for read and writing document.
Default Value: LemonStorage
Example:
>>> from lemondb import LemonDB
>>> db = LemonDB('test.json')
>>> db.insert({'name': 'John Doe'})
"""
self.name = name
self.kwargs = kwargs
self.db_path = pathlib.Path(self.name)
self.repr_name = type(self).__name__
self.plugin_cls = plugin_cls
self.server = self.kwargs.get('server', False)
self.client = self.kwargs.get('client', False)
if logger and self.kwargs.get('debug', False):
#: added -> v0.0.4
self.set_logger()
if not plugin_cls:
self.plugin_cls = LemonPlugin()
else:
self.plugin_cls = plugin_cls
if not middleware_cls:
self.middleware_cls = JsonMiddleware()
else:
self.middleware_cls = middleware_cls
if not storage_cls:
self.storage_cls = LemonStorage(
path=self.db_path,
middleware_cls=self.middleware_cls
)
else:
self.storage_cls = storage_cls
if not 'table_name' in self.kwargs:
self.kwargs.__setitem__('table_name', self.default_table)
self.table_name = self.kwargs.get('table_name', self.default_table)
if self.table_name:
self.default_table = self.table_name
if not self.client and not self.server \
and self.kwargs.get('host_checking', True):
checking = self.__check_if_server_client()
if checking:
self.client = True
elif checking == 0:
self.server = True
if self.server:
parsed = self.__parse_url(self.name)
if self.logger:
self.logger.info('Binding server -> : {h}:{p}'.format(
h=parsed['host'],
p=parsed['port']
))
db_dir = pathlib.Path().home() / '.lemondb' / 'db'
if not db_dir.exists():
os.mkdir(db_dir.absolute())
self.name = str(
(db_dir / '{h}-{p}.db'.format(
h=parsed['host'],
p=parsed['port']
)).absolute()
)
self.storage_cls = LemonStorage(
path=(db_dir / '{h}-{p}.db'.format(
h=parsed['host'],
p=parsed['port']
)).absolute(),
middleware_cls=self.middleware_cls
)
db = LemonDB(self.name, host_checking=False)
self.run_plugin(plugin_cls=plugin_cls)
if not (db_dir / '{h}-{p}.db'.format(
h=parsed['host'],
p=parsed['port'])).exists():
self.plugin_cls._init_db(version)
self.server_instance = LemonServer(
host=(parsed['host'], parsed['port']),
db=db
)
self.server_instance.run()
elif self.client:
parsed = self.__parse_url(self.name)
if self.logger:
self.logger.info('Client Instance: {h}:{p}'.format(
h=parsed['host'],
p=parsed['port'])
)
self.client_instance = LemonClient(
host=(parsed['host'], parsed['port'])
)
self.run_plugin(plugin_cls=plugin_cls)
if not self.db_path.exists() and not self.client and not self.server:
self.plugin_cls._init_db(version)
if self.server:
self.repr_name = 'LemonServer'
elif self.client:
self.repr_name = 'LemonClient'
_data = self.storage_cls.read()
v = _data.get('__version__', None)
if not version:
warn('Version not found, Please recreate the database or migrate using `migrate` function')
elif v < version:
warn('The database is created from the previous LemonDB version. You can migrate using `migrate`')
@catch_exceptions()
def migrate(self):
start = time.time()
if self.logger:
self.logger.info("Migrating -> {} ...".format('.'.join([str(x) for x in version])))
v = self.storage_cls.read().get('__version__', None)
if not v:
warn('Version not found, it may cause error')
elif v[0] == 0:
raise RuntimeError('The database is created from the old version.')
elif v == version:
if self.logger:
self.logger.info('Database is already updated')
for _ in self.tables():
i = self.items(_)
if i != [{}]:
if i: self.insert_many(i)
self.__update_version(version)
if self.logger:
self.logger.success('All items were re-inserted succesfully: {:.2f}s'.format(
time.time() - start
))
@catch_exceptions()
def table(self, name: str, **options):
"""
The table for the database. If the given
table name doesnt exist then create a new one.
The table handles a sorted dictionary that contains
the data.
"""
options.__setitem__('table_name', name)
return LemonDB(
name=name,
plugin_cls=self.plugin_cls,
middleware_cls=self.middleware_cls,
storage_cls=self.storage_cls,
**options
)
@catch_exceptions()
def tables(self):
"""
Get all table name and return a list.
"""
return [k for k in self.storage_cls.read().keys() if k != '__version__']
@catch_exceptions()
def items(self, table_name: Optional[str] = None, **options):
"""
Return all items from the given table, packed on a single list
"""
table_name = table_name or self.table_name
return_dict = options.get('dict', False)
item = options.get('item', False)
data = self.storage_cls.read()
if self.client_instance:
return self.client_instance.send({
'op': 'items',
'data': table_name,
'kwargs': options
})
if item:
l = []
for k,v in data.get(table_name).items():
l.append(v)
return l
_items = [{k:v} for k,v in data.get(table_name).items()]
if return_dict:
for k,v in data.get(table_name).items():
_items = [{k:v} for k,v in v.items()]
return _items
@catch_exceptions()
def clear(self):
"""
Clear all item from the database including the tables and
create a new default table name.
"""
if self.client_instance:
self.client_instance.send({'op': 'clear'})
data = self.storage_cls.read()
data.clear()
self.plugin_cls._init_db(version)
return data
@catch_exceptions()
def insert(self, item: Mapping, **options):
"""
Insert a item to the database. The item should
be a mapping `(dict)`.
Parameter:
:param item (Mapping):
The item to be added to the database.
Example:
>>> from lemondb import LemonDB
>>> db = LemonDB('test')
>>> db.insert({'name': 'zenqi'})
Retun:
The item to be inserted.
"""
_item = item
#: If the data is set, then convert it to list.
if isinstance(item, set):
item = list(item)
else:
item = typenize(item)
if self.client_instance:
return self.client_instance.send({
'op': 'insert',
'data': item,
'kwargs': options
})
raw_data = self.storage_cls.read(False)
raw = False
if not self.db_path.exists():
self.plugin_cls._init_db(version)
table = options.pop('table', self.default_table)
if table:
_r_d = raw_data.get(table, None)
if not _r_d:
_r_d = {table: {}}
if table == self.default_table:
item = self.storage_cls._increment(
table=self.__read_table__(), item=item)
else:
item = self.__construct_table(
table=table,
data=_r_d,
raw=item
)
self.storage_cls.write(item, table_name=table)
return _item
@catch_exceptions()
def insert_many(self, iterable: Iterable):
"""
Simillar to `insert` however insert all items
from the given iterable / list.
"""
if self.client_instance:
return self.client_instance.send({
'op': 'insert_many',
'data': iterable
})
for i in iterable:
if self.client_instance:
self.client_instance.send(
op='insert_many',
data=i
)
else: self.insert(i)
return iterable
@catch_exceptions()
def delete(
self,
query: Any,
**options
):
"""
Delete a key from a query given. The query accept
3 types. Similar to `search`.
Parameter:
query (Any):
The query of the key to delete.
all (Optional[Boolean]):
(added on v0.0.2)
Set if all existing keys/simillar value
to be deleted. Default Value: `True`
Examples:
>>> query = Query()
>>> db.delete(query.name == 'John Doe')
>>> ...
Return:
The deleted item.
"""
if self.client_instance:
return self.client_instance.send({
'op': 'delete',
'data': query,
'kwargs': options
})
all = options.pop('all', True)
if isinstance(query, Mapping):
self.storage_cls.delete(query, all=all)
return query
else:
try:
if all:
data = self.search(query)
else:
data = self.search(query)[0]
except IndexError:
# TODO: No result found on a search.
return None
self.storage_cls.delete(data, all=all)
return data
@catch_exceptions()
def update(self, query: Any, item: Mapping):
"""
ADDED: `v0.0.2`
Update the data from the default given table name.
This perform a `search` query that can be 3 types,
and update the first result of the query.
Parameters:
query (Any):
The query syntax for searching item
item (Mapping):
The item to be replace
Example:
>>> from lemondb import LemonDB, Query
>>> db = LemonDB('test.json')
>>> query = Query()
>>> ...
>>> db.insert({'name': 'John Doe', 'password': '1234'})
>>> ...
>>> db.update(
>>> query.name == 'John Doe',
>>> {'password': 'newpassword'}
>>> )
>>> ...
"""
if self.client_instance:
return self.client_instance.send({
'op': 'update',
'data': query,
'item': item
})
result = self.find_one(query)
if not result:
#: TODO: Searching failed. No result found
raise SearchQueryError('The search query doesnt exist on the table/database')
data = self.storage_cls.read(False, remove_version=True)
for table, value in list(data.items()):
if not table == '__version__':
for k,v in list(value.items()):
if untypenize(v) == result:
data[table][k].update(typenize(item))
break
self.storage_cls.write(
data,
table_name=self.table_name,
mode='w',
raw=True
)
return item
@catch_exceptions()
def search(self, query=None, **options):
"""
Search an item from the database. The query accept
4 types. The first one is the standard `SearchQuery`,
next is the `lambda` function, dict query and the `re` pattern.
Parameter:
query (Any):
The query of the key to search.
**options(Kwargs):
`rate (int)`:
The rate to index the first appearance of int
from the data. For example:
By setting the rate to 2, it will return the
first 2 item
Example:
>>> from lemondb import LemonDB, Query
>>> db = LemonDB('test.json')
>>> db.insert({'name': 'John Doe'})
>>> ...
>>> query = Query()
>>> db.search(query.name == 'John Doe')
>>> [{"name": "John Doe"}]
Return:
The list of possible result for the queries.
"""
if self.client_instance:
return self.client_instance.send({
'op': 'search',
'data': query,
'kwargs': options
})
rate = options.pop('rate', None)
if self.client_instance:
self.client_instance.send(
op='search',
data=query
)
items = self.items(dict=True)
result = []
use_lambda = False
use_re = False
use_sq = False
use_dict = False
if isinstance(query, Lambda):
use_lambda = True
elif isinstance(query, SearchQuery):
use_sq = True
elif isinstance(query, dict):
use_dict = True
elif isinstance(query, str):
use_re = True
if not query:
c = LemonCursor(self.items(item=True))
if rate and len(c) <= 0:
return []
elif rate and rate <= 1:
c = c.all()[0]
elif rate:
c = c.all()[:rate]
return c
if use_dict:
query = list(query.items())
c = LemonCursor([])
for i in self.items(item=True):
item = list(i.items())
#: Convert the list of items into set and check for same key, value
if list(filter(lambda x: x in query, item)):
c.add_query_result(i)
if rate and len(c) <= 0:
return []
elif rate and rate <= 1:
c = c.all()[0]
elif rate:
c = c.all()[:rate]
return c
reconstructed_list = []
for i in items:
for k,v in i.items():
if use_re:
for _, rv in iterate_dict(v):
if re.search(query, str(rv), re.IGNORECASE):
result.append(v)
else:
reconstructed_list.append(v)
_query = Linq(reconstructed_list)
if use_sq:
op, key, item = query()
def wrapper(i):
if isinstance(key, str):
_key = key.lower()
m = re.search(_key, i[item], re.IGNORECASE)
if m:
return ops[op](i[item], m.group())
else:
#: If there is no match, then just ignore.
return ops[op](i[item], key)
return ops[op](i[item], key)
c = LemonCursor(_query.where(wrapper).to_list())
if rate and len(c) <= 0:
return []
elif rate and rate <= 1:
c = c.all()[0]
elif rate:
c = c.all()[:rate]
return c
if use_lambda:
c = LemonCursor(_query.where(query).to_list())
if rate and len(c) <= 0:
return []
elif rate and rate <= 1:
c = c.all()[0]
elif rate:
c = c.all()[:rate]
return c
c = LemonCursor(result)
if rate and len(c) <= 0:
return []
elif rate and rate <= 1:
c = c.all()[0]
elif rate:
c = c.all()[:rate]
return c
@catch_exceptions()
def find_one(self, query=None):
"""
Fetch the query and return the first appearance from the
database.
Example:
>>> db.find_one({'name': 'John Doe'})
>>> {'name': 'John Doe', 'user_id': 19123713123}
"""
if self.client_instance:
return self.client_instance.send({
'op': 'find_one',
'data': query
})
return self.search(query, rate=1)
@catch_exceptions()
def find(self, query=None, **options):
"""
Simillar and alias for the `search` function.
"""
if self.client_instance:
return self.client_instance.send({
'op': 'find_one',
'data': query,
'kwargs': options
})
return self.search(query, **options)
def __len__(self):
data = self.storage_cls.read()
return len(data[self.default_table])
def __repr__(self) -> str:
return "{name}(table_name=\"{table}\", length={length}, table_count={table_count})".format(
name=self.repr_name,
table=self.default_table,
length=len(self),
table_count=len(self.tables()),
)
def __read_table__(self):
return self.storage_cls.read(False)\
.get(
self.table_name,
{self.table_name: {}}
)
def __construct_table(
self,
table: str,
data: Mapping,
raw: Optional[Mapping] = {},
):
"""
Create a table for the given data
"""
if not raw:
_ = {table: {}}
elif raw and data:
_ = self.storage_cls._increment(
table=self.__read_table__(),
item=raw
)
else:
_ = self.storage_cls._increment(
table=self.__read_table__(),
item=raw
)
data.update(_); return data
def __check_if_server_client(self):
"""
Check if the db name is server or client
"""
# Match if the given db name is a server pattern
pattern = re.compile(
r'^(?:lemondb|http)s?://' # lemondb:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
m = re.match(pattern, self.name)
if m:
#: Try if it is client or server
parsed = self.__parse_url(self.name)
try:
sock = socket.socket()
sock.connect((parsed['host'], parsed['port']))
return 1 #: 1 means client
except socket.error:
return 0 #: 0 means server
return None
def __parse_url(self, url: str):
"""
Parse the url and return a dictionary.
Version Added: 0.0.3
"""
parsed = urlparse(url)
q = dict(parse_qsl(parsed.query))
return {
'scheme': parsed.scheme,
'host': parsed.hostname,
'port': parsed.port,
'query': q,
}
def __update_version(self, v: list):
data = self.storage_cls.read(False)
data.update({'__version__': v})
self.storage_cls.write(data, table_name=None, raw=True, mode='w')
return ".".join([str(x) for x in v])
def run_plugin(self, plugin_cls: Any):
"""
Seperate function to run plugin.
Version Added: 0.0.3
"""
try:
#: Run the plugin and give all parameters
self.plugin_cls.run(
name=self.name,
storage_cls=self.storage_cls,
plugin_cls=self.plugin_cls,
middleware_cls=self.middleware_cls,
**self.kwargs
)
except TypeError:
self.plugin_cls = plugin_cls()
self.plugin_cls.run(
name=self.name,
storage_cls=self.storage_cls,
plugin_cls=self.plugin_cls,
middleware_cls=self.middleware_cls,
**self.kwargs
)
def set_logger(self):
self.logger = logger
find.__doc__ += search.__doc__
|
the-stack_0_8898 | """empty message
Revision ID: 237df1268348
Revises:
Create Date: 2021-07-29 21:33:34.739710
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '237df1268348'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('add_newsletter',
sa.Column('newsletter_id', sa.Integer(), nullable=False),
sa.Column('subject', sa.String(length=50), nullable=True),
sa.Column('opener', sa.String(length=400), nullable=True),
sa.Column('preview', sa.String(length=400), nullable=True),
sa.PrimaryKeyConstraint('newsletter_id')
)
op.create_table('article_category',
sa.Column('category_id', sa.Integer(), nullable=False),
sa.Column('category_name', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('category_id')
)
op.create_table('articles',
sa.Column('article_id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=100), nullable=True),
sa.Column('title', sa.String(length=250), nullable=True),
sa.Column('description', sa.String(length=500), nullable=True),
sa.Column('time', sa.String(length=300), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('newsletter_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['article_category.category_id'], ),
sa.ForeignKeyConstraint(['newsletter_id'], ['add_newsletter.newsletter_id'], ),
sa.PrimaryKeyConstraint('article_id')
)
op.create_table('newsletter_campaign',
sa.Column('Newsletter_campaign_id', sa.Integer(), nullable=False),
sa.Column('campaign_id', sa.String(length=50), nullable=True),
sa.Column('newsletter_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['newsletter_id'], ['add_newsletter.newsletter_id'], ),
sa.PrimaryKeyConstraint('Newsletter_campaign_id')
)
op.create_table('newsletter_schedule',
sa.Column('schedule_id', sa.Integer(), nullable=False),
sa.Column('newsletter_id', sa.Integer(), nullable=True),
sa.Column('schedule_date', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['newsletter_id'], ['add_newsletter.newsletter_id'], ),
sa.PrimaryKeyConstraint('schedule_id')
)
op.create_table('newsletter_content',
sa.Column('newsletter_content_id', sa.Integer(), nullable=False),
sa.Column('newsletter_id', sa.Integer(), nullable=True),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['articles.article_id'], ),
sa.ForeignKeyConstraint(['newsletter_id'], ['add_newsletter.newsletter_id'], ),
sa.PrimaryKeyConstraint('newsletter_content_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('newsletter_content')
op.drop_table('newsletter_schedule')
op.drop_table('newsletter_campaign')
op.drop_table('articles')
op.drop_table('article_category')
op.drop_table('add_newsletter')
# ### end Alembic commands ###
|
the-stack_0_8900 | # kontonr.py - functions for handling Norwegian bank account numbers
# coding: utf-8
#
# Copyright (C) 2018 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Konto nr. (Norwegian bank account number)
Konto nr. is the country-specific part in Norwegian IBAN codes. The number
consists of 11 digits, the first 4 are the bank identifier and the last is a
check digit. This module does not check if the bank identifier exists.
More information:
* https://www.ecbs.org/iban/norway-bank-account-number.html
>>> validate('8601 11 17947')
'86011117947'
>>> validate('0000.4090403') # postgiro bank code
'4090403'
>>> validate('8601 11 17949') # invalid check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('86011117947')
'8601.11.17947'
>>> to_iban('8601 11 17947')
'NO93 8601 11 17947'
"""
from stdnum import luhn
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' .-').strip()
if number.startswith('0000'):
number = number[4:] # strip leading 0000 postgiro bank code
return number
def _calc_check_digit(number):
"""Calculate the check digit for the 11-digit number."""
weights = (6, 7, 8, 9, 4, 5, 6, 7, 8, 9)
return str(sum(w * int(n) for w, n in zip(weights, number)) % 11)
def validate(number):
"""Check if the number provided is a valid bank account number."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if len(number) == 7:
luhn.validate(number)
elif len(number) == 11:
if _calc_check_digit(number) != number[-1]:
raise InvalidChecksum()
else:
raise InvalidLength()
return number
def is_valid(number):
"""Check if the number provided is a valid bank account number."""
try:
return bool(validate(number))
except ValidationError:
return False
def to_iban(number):
"""Convert the number to an IBAN."""
from stdnum import iban
separator = ' ' if ' ' in number else ''
return separator.join((
'NO' + iban.calc_check_digits('NO00' + number),
number))
def format(number):
"""Reformat the number to the standard presentation format."""
number = compact(number)
number = (11 - len(number)) * '0' + number
return '.'.join([
number[:4],
number[4:6],
number[6:],
])
|
the-stack_0_8903 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe import _
from frappe.desk.doctype.notification_settings.notification_settings import (
is_email_notifications_enabled_for_type,
is_notifications_enabled,
set_seen_value,
)
from frappe.model.document import Document
class NotificationLog(Document):
def after_insert(self):
frappe.publish_realtime("notification", after_commit=True, user=self.for_user)
set_notifications_as_unseen(self.for_user)
if is_email_notifications_enabled_for_type(self.for_user, self.type):
try:
send_notification_email(self)
except frappe.OutgoingEmailError:
frappe.log_error(message=frappe.get_traceback(), title=_("Failed to send notification email"))
def get_permission_query_conditions(for_user):
if not for_user:
for_user = frappe.session.user
if for_user == "Administrator":
return
return """(`tabNotification Log`.for_user = '{user}')""".format(user=for_user)
def get_title(doctype, docname, title_field=None):
if not title_field:
title_field = frappe.get_meta(doctype).get_title_field()
title = docname if title_field == "name" else frappe.db.get_value(doctype, docname, title_field)
return title
def get_title_html(title):
return '<b class="subject-title">{0}</b>'.format(title)
def enqueue_create_notification(users, doc):
"""
During installation of new site, enqueue_create_notification tries to connect to Redis.
This breaks new site creation if Redis server is not running.
We do not need any notifications in fresh installation
"""
if frappe.flags.in_install:
return
doc = frappe._dict(doc)
if isinstance(users, str):
users = [user.strip() for user in users.split(",") if user.strip()]
users = list(set(users))
frappe.enqueue(
"frappe.desk.doctype.notification_log.notification_log.make_notification_logs",
doc=doc,
users=users,
now=frappe.flags.in_test,
)
def make_notification_logs(doc, users):
from frappe.social.doctype.energy_point_settings.energy_point_settings import (
is_energy_point_enabled,
)
for user in users:
if frappe.db.exists("User", {"email": user, "enabled": 1}):
if is_notifications_enabled(user):
if doc.type == "Energy Point" and not is_energy_point_enabled():
return
_doc = frappe.new_doc("Notification Log")
_doc.update(doc)
_doc.for_user = user
if _doc.for_user != _doc.from_user or doc.type == "Energy Point" or doc.type == "Alert":
_doc.insert(ignore_permissions=True)
def send_notification_email(doc):
if doc.type == "Energy Point" and doc.email_content is None:
return
from frappe.utils import get_url_to_form, strip_html
doc_link = get_url_to_form(doc.document_type, doc.document_name)
header = get_email_header(doc)
email_subject = strip_html(doc.subject)
frappe.sendmail(
recipients=doc.for_user,
subject=email_subject,
template="new_notification",
args={
"body_content": doc.subject,
"description": doc.email_content,
"document_type": doc.document_type,
"document_name": doc.document_name,
"doc_link": doc_link,
},
header=[header, "orange"],
now=frappe.flags.in_test,
)
def get_email_header(doc):
docname = doc.document_name
header_map = {
"Default": _("New Notification"),
"Mention": _("New Mention on {0}").format(docname),
"Assignment": _("Assignment Update on {0}").format(docname),
"Share": _("New Document Shared {0}").format(docname),
"Energy Point": _("Energy Point Update on {0}").format(docname),
}
return header_map[doc.type or "Default"]
@frappe.whitelist()
def mark_all_as_read():
unread_docs_list = frappe.db.get_all(
"Notification Log", filters={"read": 0, "for_user": frappe.session.user}
)
unread_docnames = [doc.name for doc in unread_docs_list]
if unread_docnames:
filters = {"name": ["in", unread_docnames]}
frappe.db.set_value("Notification Log", filters, "read", 1, update_modified=False)
@frappe.whitelist()
def mark_as_read(docname):
if docname:
frappe.db.set_value("Notification Log", docname, "read", 1, update_modified=False)
@frappe.whitelist()
def trigger_indicator_hide():
frappe.publish_realtime("indicator_hide", user=frappe.session.user)
def set_notifications_as_unseen(user):
try:
frappe.db.set_value("Notification Settings", user, "seen", 0)
except frappe.DoesNotExistError:
return
|
the-stack_0_8904 | r"""
Query Builder Datalog
=====================
Complements QueryBuilderBase with query capabilities,
as well as Region and Neurosynth capabilities
"""
from collections import defaultdict
from typing import (
AbstractSet,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
from uuid import uuid1
from .. import datalog
from .. import expressions as ir
from ..datalog import aggregation
from ..datalog.constraints_representation import RightImplication
from ..datalog.expression_processing import (
TranslateToDatalogSemantics,
reachable_code,
)
from ..type_system import Unknown
from ..utils import NamedRelationalAlgebraFrozenSet, RelationalAlgebraFrozenSet
from .datalog.standard_syntax import parser as datalog_parser
from .datalog.natural_syntax import parser as nat_datalog_parser
from .query_resolution import NeuroSynthMixin, QueryBuilderBase, RegionMixin
from ..datalog import DatalogProgram
from . import query_resolution_expressions as fe
__all__ = ["QueryBuilderDatalog"]
class QueryBuilderDatalog(RegionMixin, NeuroSynthMixin, QueryBuilderBase):
"""
Complements QueryBuilderBase with query capabilities,
as well as Region and Neurosynth capabilities
"""
def __init__(
self,
program_ir: DatalogProgram,
chase_class: Type[aggregation.Chase] = aggregation.Chase,
) -> "QueryBuilderDatalog":
"""
Query builder with query, Region, Neurosynth capabilities
Parameters
----------
program_ir : DatalogProgram
Datalog program's intermediate representation,
usually blank
chase_class : Type[aggregation.Chase], optional
used to compute deterministic solutions,
by default aggregation.Chase
Returns
-------
QueryBuilderDatalog
see description
"""
super().__init__(program_ir, logic_programming=True)
self.chase_class = chase_class
self.frontend_translator = fe.TranslateExpressionToFrontEndExpression(
self
)
self.translate_expression_to_datalog = TranslateToDatalogSemantics()
self.datalog_parser = datalog_parser
self.nat_datalog_parser = nat_datalog_parser
@property
def current_program(self) -> List[fe.Expression]:
"""
Returns the list of expressions that have currently been
declared in the program
Returns
-------
List[fe.Expression]
see description
Example
-------
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (2, 2)], name="l")
l: typing.AbstractSet[typing.Tuple[int, int]] = [(1, 2), (2, 2)]
>>> with nl.scope as e:
... e.l2[e.x] = e.l[e.x, e.y] & (e.x == e.y)
... cp = nl.current_program
>>> cp
[
l2(x) ← ( l(x, y) ) ∧ ( x eq y )
]
"""
cp = []
for rules in self.program_ir.intensional_database().values():
for rule in rules.formulas:
cp.append(self.frontend_translator.walk(rule))
return cp
def _declare_implication(
self, consequent: fe.Expression, antecedent: fe.Expression
) -> fe.Expression:
"""
Creates an implication of the consequent by the antecedent
and adds the rule to the current program:
consequent <- antecedent
Parameters
----------
consequent : fe.Expression
see description, will be processed to a logic form before
creating the implication rule
antecedent : fe.Expression
see description, will be processed to a logic form before
creating the implication rule
Returns
-------
fe.Expression
see description
Example
-------
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (2, 2)], name="l")
l: typing.AbstractSet[typing.Tuple[int, int]] = [(1, 2), (2, 2)]
>>> with nl.scope as e:
... nl._declare_implication(e.l2[e.x], e.l2[e.x, e.y])
... cp = nl.current_program
>>> cp
[
l2(x) ← l(x, y)
]
"""
consequent = self.translate_expression_to_datalog.walk(
consequent.expression
)
antecedent = self.translate_expression_to_datalog.walk(
antecedent.expression
)
rule = datalog.Implication(consequent, antecedent)
self.program_ir.walk(rule)
return rule
def add_constraint(
self, antecedent: fe.Expression, consequent: fe.Expression
) -> fe.Expression:
"""
Creates an right implication of the consequent by the antecedent
and adds the rule to the current program:
antecedent -> consequent
Parameters
----------
antecedent : fe.Expression
see description, will be processed to a logic form before
creating the right implication rule
consequent : fe.Expression
see description, will be processed to a logic form before
creating the right implication rule
Returns
-------
fe.Expression
see description
Example
-------
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (2, 2)], name="l")
l: typing.AbstractSet[typing.Tuple[int, int]] = [(1, 2), (2, 2)]
>>> with nl.scope as e:
... nl.add_constraint(e.l2[e.x, e.y], e.l2[e.x])
"""
consequent = self.translate_expression_to_datalog.walk(
consequent.expression
)
antecedent = self.translate_expression_to_datalog.walk(
antecedent.expression
)
rule = RightImplication(antecedent, consequent)
self.program_ir.walk(rule)
return rule
def execute_datalog_program(self, code: str) -> None:
"""
Execute a Datalog program in classical syntax
Parameters
----------
code : string
Datalog program.
"""
intermediate_representation = self.datalog_parser(code)
self.program_ir.walk(intermediate_representation)
def execute_nat_datalog_program(self, code: str) -> None:
"""Execute a natural language Datalog program in classical syntax
Parameters
----------
code : string
Datalog program.
"""
intermediate_representation = self.nat_datalog_parser(code)
self.program_ir.walk(intermediate_representation)
def query(
self, *args
) -> Union[bool, RelationalAlgebraFrozenSet, fe.Symbol]:
"""
Performs an inferential query on the database.
There are three modalities
1. If there is only one argument, the query returns `True` or `False`
depending on wether the query could be inferred.
2. If there are two arguments and the first is a tuple of `fe.Symbol`,
it returns the set of results meeting the query in the second argument.
3. If the first argument is a predicate (e.g. `Q(x)`) it performs the
query, adds it to the engine memory, and returns the
corresponding symbol.
See example for 3 modalities
Returns
-------
Union[bool, RelationalAlgebraFrozenSet, fe.Symbol]
read the descrpition.
Example
-------
Note: example ran with pandas backend
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (2, 2)], name="l")
l: typing.AbstractSet[typing.Tuple[int, int]] = [(1, 2), (2, 2)]
>>> with nl.environment as e:
... e.l2[e.x, e.y] = e.l[e.x, e.y] & (e.x == e.y)
... s1 = nl.query(e.l2[e.x, e.y])
... s2 = nl.query((e.x,), e.l2[e.x, e.y])
... s3 = nl.query(e.l3[e.x], e.l2[e.x, e.y])
>>> s1
True
>>> s2
x
0 2
>>> s3
l3: typing.AbstractSet[typing.Tuple[int]] = [(2,)]
"""
if len(args) == 1:
predicate = args[0]
head = tuple()
elif len(args) == 2:
head, predicate = args
if isinstance(head, fe.Symbol):
head = (head,)
else:
raise ValueError("query takes 1 or 2 arguments")
solution_set, functor_orig = self._execute_query(head, predicate)
if not isinstance(head, tuple):
out_symbol = ir.Symbol[solution_set.type](functor_orig.name)
self.add_tuple_set(solution_set.value, name=functor_orig.name)
return fe.Symbol(self, out_symbol.name)
elif len(head) == 0:
return len(solution_set.value) > 0
else:
return RelationalAlgebraFrozenSet(solution_set.value)
def _execute_query(
self,
head: Union[fe.Symbol, Tuple[fe.Expression, ...]],
predicate: fe.Expression,
) -> Tuple[AbstractSet, Optional[ir.Symbol]]:
"""
[Internal usage - documentation for developpers]
Performs an inferential query. Will return as first output
an AbstractSet with as many elements as solutions of the
predicate query. The AbstractSet's columns correspond to
the expressions in the head.
If head expressions are arguments of a functor, the latter will
be returned as the second output, defaulted as None.
Parameters
----------
head : Union[fe.Symbol, Tuple[fe.Expression, ...]]
see description
predicate : fe.Expression
see description
Returns
-------
Tuple[AbstractSet, Optional[fe.Symbol]]
see description
Examples
--------
Note: example ran with pandas backend
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (2, 2)], name="l")
l: typing.AbstractSet[typing.Tuple[int, int]] = [(1, 2), (2, 2)]
>>> with nl.scope as e:
... e.l2[e.x, e.y] = e.l[e.x, e.y] & (e.x == e.y)
... s1 = nl._execute_query(tuple(), e.l2[e.x, e.y])
... s2 = nl._execute_query((e.x,), e.l2[e.x, e.y])
... s3 = nl._execute_query(e.l2[e.x, e.y], e.l2[e.x, e.y])
>>> s1
(
C{
Empty DataFrame
Columns: []
Index: [0]
: typing.AbstractSet
},
None
)
>>> s2
(
C{
x
0 2
: typing.AbstractSet
},
None
)
>>> s3
(
C{
x y
0 2 2
: typing.AbstractSet
},
S{
l2: Unknown
}
)
"""
functor_orig = None
self.program_ir.symbol_table = self.symbol_table.create_scope()
if isinstance(head, fe.Operation):
functor_orig = head.expression.functor
new_head = self.new_symbol()(*head.arguments)
functor = new_head.expression.functor
elif isinstance(head, tuple):
new_head = self.new_symbol()(*head)
functor = new_head.expression.functor
query_expression = self._declare_implication(new_head, predicate)
reachable_rules = reachable_code(query_expression, self.program_ir)
solution = self.chase_class(
self.program_ir, rules=reachable_rules
).build_chase_solution()
solution_set = solution.get(functor.name, ir.Constant(set()))
self.program_ir.symbol_table = self.symbol_table.enclosing_scope
return solution_set, functor_orig
def solve_all(self) -> Dict[str, NamedRelationalAlgebraFrozenSet]:
"""
Returns a dictionary of "predicate_name": "Content"
for all elements in the solution of the Datalog program.
Returns
-------
Dict[str, NamedRelationalAlgebraFrozenSet]
extensional and intentional facts that have been derived
through the current program
Example
-------
Note: example ran with pandas backend
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (2, 2)], name="l")
l: typing.AbstractSet[typing.Tuple[int, int]] = [(1, 2), (2, 2)]
>>> with nl.scope as e:
... e.l2[e.x] = e.l[e.x, e.y] & (e.x == e.y)
... solution = nl.solve_all()
>>> solution
{
'l':
0 1
0 1 2
1 2 2
'l2':
x
0 2
}
"""
solution_ir = self.chase_class(self.program_ir).build_chase_solution()
solution = {}
for k, v in solution_ir.items():
solution[k.name] = NamedRelationalAlgebraFrozenSet(
self.predicate_parameter_names(k.name), v.value.unwrap()
)
solution[k.name].row_type = v.value.row_type
return solution
def reset_program(self) -> None:
"""Clears current symbol table"""
self.symbol_table.clear()
def add_tuple_set(
self, iterable: Iterable, type_: Type = Unknown, name: str = None
) -> fe.Symbol:
"""
Creates an AbstractSet fe.Symbol containing the elements specified in
the iterable with a List[Tuple[Any, ...]] format (see examples).
Typically used to crate extensional facts from existing databases
Parameters
----------
iterable : Iterable
typically a list of tuples of values, other formats will
be interpreted as the latter
type_ : Type, optional
type of elements for the tuples, if not specified
will be inferred from the first element, by default Unknown
name : str, optional
name for the AbstractSet symbol, by default None
Returns
-------
fe.Symbol
see description
Examples
--------
>>> p_ir = DatalogProgram()
>>> nl = QueryBuilderDatalog(program_ir=p_ir)
>>> nl.add_tuple_set([(1, 2), (3, 4)], name="l1")
l1: typing.AbstractSet[typing.Tuple[int, int]] = \
[(1, 2), (3, 4)]
>>> nl.add_tuple_set([[1, 2, 3], (3, 4)], name="l2")
l2: typing.AbstractSet[typing.Tuple[int, int, float]] = \
[(1, 2, 3.0), (3, 4, nan)]
>>> nl.add_tuple_set((1, 2, 3), name="l3")
l3: typing.AbstractSet[typing.Tuple[int]] = \
[(1,), (2,), (3,)]
"""
if name is None:
name = str(uuid1())
if isinstance(type_, tuple):
type_ = Tuple[type_]
symbol = ir.Symbol[AbstractSet[type_]](name)
self.program_ir.add_extensional_predicate_from_tuples(
symbol, iterable, type_=type_
)
return fe.Symbol(self, name)
def predicate_parameter_names(
self, predicate_name: Union[str, fe.Symbol, fe.Expression]
) -> Tuple[str]:
"""
Get the names of the parameters for the given predicate
Parameters
----------
predicate_name : Union[str, fe.Symbol, fe.Expression]
predicate to obtain the names from
Returns
-------
tuple[str]
parameter names
"""
predicate_name = self._get_predicate_name(predicate_name)
parameter_names = []
pcount = defaultdict(lambda: 0)
for s in self.program_ir.predicate_terms(predicate_name):
param_name = self._obtain_parameter_name(s)
pcount[param_name] += 1
if pcount[param_name] > 1:
param_name = f"{param_name}_{pcount[param_name] - 1}"
parameter_names.append(param_name)
return tuple(parameter_names)
def _obtain_parameter_name(self, parameter_expression):
if hasattr(parameter_expression, "name"):
param_name = parameter_expression.name
elif hasattr(parameter_expression, "functor") and hasattr(
parameter_expression.functor, "name"
):
param_name = parameter_expression.functor.name
else:
param_name = ir.Symbol.fresh().name
return param_name
def _get_predicate_name(self, predicate_name):
if isinstance(predicate_name, fe.Symbol):
predicate_name = predicate_name.neurolang_symbol
elif isinstance(predicate_name, fe.Expression) and isinstance(
predicate_name.expression, ir.Symbol
):
predicate_name = predicate_name.expression
elif not isinstance(predicate_name, str):
raise ValueError(f"{predicate_name} is not a string or symbol")
return predicate_name
|
the-stack_0_8905 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from happy_python import HappyPyException
class TestHappyPyException(unittest.TestCase):
def test_hpe(self):
try:
raise HappyPyException('自定义错误')
except HappyPyException as e:
self.assertEqual('自定义错误', str(e))
|
the-stack_0_8906 | from __future__ import division
import requests
import datetime as dt
import json
from functools import partial
# from multiprocessing.pool import Pool
from billiard.pool import Pool
from twitterscraper.tweet import Tweet
from twitterscraper.ts_logger import logger
from twitterscraper.user import User
from fake_useragent import UserAgent
import urllib
ua = UserAgent()
HEADER = {'User-Agent': ua.random}
logger.info(HEADER)
INIT_URL = 'https://twitter.com/search?f=tweets&vertical=default&q={q}&l={lang}'
RELOAD_URL = 'https://twitter.com/i/search/timeline?f=tweets&vertical=' \
'default&include_available_features=1&include_entities=1&' \
'reset_error_state=false&src=typd&max_position={pos}&q={q}&l={lang}'
INIT_URL_USER = 'https://twitter.com/{u}'
RELOAD_URL_USER = 'https://twitter.com/i/profiles/show/{u}/timeline/tweets?' \
'include_available_features=1&include_entities=1&' \
'max_position={pos}&reset_error_state=false'
def get_query_url(query, lang, pos, from_user = False):
if from_user:
if pos is None:
return INIT_URL_USER.format(u=query)
else:
return RELOAD_URL_USER.format(u=query, pos=pos)
if pos is None:
return INIT_URL.format(q=query, lang=lang)
else:
return RELOAD_URL.format(q=query, pos=pos, lang=lang)
def linspace(start, stop, n):
if n == 1:
yield stop
return
h = (stop - start) / (n - 1)
for i in range(n):
yield start + h * i
def query_single_page(query, lang, pos, retry=50, from_user=False):
"""
Returns tweets from the given URL.
:param query: The query parameter of the query url
:param lang: The language parameter of the query url
:param pos: The query url parameter that determines where to start looking
:param retry: Number of retries if something goes wrong.
:return: The list of tweets, the pos argument for getting the next page.
"""
url = get_query_url(query, lang, pos, from_user)
logger.info('Scraping tweets from {}', url)
try:
response = requests.get(url, headers=HEADER)
if pos is None: # html response
html = response.text or ''
json_resp = None
else:
html = ''
try:
json_resp = json.loads(response.text)
html = json_resp['items_html'] or ''
except ValueError as e:
logger.exception('Failed to parse JSON "{}" while requesting "{}"'.format(e, url))
tweets = list(Tweet.from_html(html))
if not tweets:
try:
if json_resp:
pos = json_resp['min_position']
has_more_items = json_resp['has_more_items']
if not has_more_items:
logger.info("Twitter returned : 'has_more_items' ")
return [], None
else:
pos = None
except:
pass
if retry > 0:
logger.info('Retrying... (Attempts left: {})'.format(retry))
return query_single_page(query, lang, pos, retry - 1, from_user)
else:
return [], pos
if json_resp:
return tweets, urllib.parse.quote(json_resp['min_position'])
if from_user:
return tweets, tweets[-1].tweet_id
return tweets, "TWEET-{}-{}".format(tweets[-1].tweet_id, tweets[0].tweet_id)
except requests.exceptions.HTTPError as e:
logger.exception('HTTPError {} while requesting "{}"'.format(
e, url))
except requests.exceptions.ConnectionError as e:
logger.exception('ConnectionError {} while requesting "{}"'.format(
e, url))
except requests.exceptions.Timeout as e:
logger.exception('TimeOut {} while requesting "{}"'.format(
e, url))
except json.decoder.JSONDecodeError as e:
logger.exception('Failed to parse JSON "{}" while requesting "{}".'.format(
e, url))
if retry > 0:
logger.info('Retrying... (Attempts left: {})'.format(retry))
return query_single_page(query, lang, pos, retry - 1)
logger.error('Giving up.')
return [], None
def query_tweets_once_generator(query, limit=None, lang='', pos=None):
"""
Queries twitter for all the tweets you want! It will load all pages it gets
from twitter. However, twitter might out of a sudden stop serving new pages,
in that case, use the `query_tweets` method.
Note that this function catches the KeyboardInterrupt so it can return
tweets on incomplete queries if the user decides to abort.
:param query: Any advanced query you want to do! Compile it at
https://twitter.com/search-advanced and just copy the query!
:param limit: Scraping will be stopped when at least ``limit`` number of
items are fetched.
:param pos: Field used as a "checkpoint" to continue where you left off in iteration
:return: A list of twitterscraper.Tweet objects. You will get at least
``limit`` number of items.
"""
logger.info('Querying {}'.format(query))
query = query.replace(' ', '%20').replace('#', '%23').replace(':', '%3A')
num_tweets = 0
try:
while True:
new_tweets, new_pos = query_single_page(query, lang, pos)
if len(new_tweets) == 0:
logger.info('Got {} tweets for {}.'.format(
num_tweets, query))
return
for t in new_tweets:
yield t, pos
# use new_pos only once you have iterated through all old tweets
pos = new_pos
num_tweets += len(new_tweets)
if limit and num_tweets >= limit:
logger.info('Got {} tweets for {}.'.format(
num_tweets, query))
return
except KeyboardInterrupt:
logger.info('Program interrupted by user. Returning tweets gathered '
'so far...')
except BaseException:
logger.exception('An unknown error occurred! Returning tweets '
'gathered so far.')
logger.info('Got {} tweets for {}.'.format(
num_tweets, query))
def query_tweets_once(*args, **kwargs):
res = list(query_tweets_once_generator(*args, **kwargs))
if res:
tweets, positions = zip(*res)
return tweets
else:
return []
def query_tweets(query, limit=None, begindate=dt.date(2006, 3, 21), enddate=dt.date.today(), poolsize=20, lang=''):
no_days = (enddate - begindate).days
if(no_days < 0):
sys.exit('Begin date must occur before end date.')
if poolsize > no_days:
# Since we are assigning each pool a range of dates to query,
# the number of pools should not exceed the number of dates.
poolsize = no_days
dateranges = [begindate + dt.timedelta(days=elem) for elem in linspace(0, no_days, poolsize+1)]
if limit and poolsize:
limit_per_pool = (limit // poolsize)+1
else:
limit_per_pool = None
queries = ['{} since:{} until:{}'.format(query, since, until)
for since, until in zip(dateranges[:-1], dateranges[1:])]
all_tweets = []
try:
pool = Pool(poolsize)
logger.info('queries: {}'.format(queries))
try:
for new_tweets in pool.imap_unordered(partial(query_tweets_once, limit=limit_per_pool, lang=lang), queries):
all_tweets.extend(new_tweets)
logger.info('Got {} tweets ({} new).'.format(
len(all_tweets), len(new_tweets)))
except KeyboardInterrupt:
logger.info('Program interrupted by user. Returning all tweets '
'gathered so far.')
finally:
pool.close()
pool.join()
return all_tweets
def query_tweets_from_user(user, limit=None):
pos = None
tweets = []
try:
while True:
new_tweets, pos = query_single_page(user, lang='', pos=pos, from_user=True)
if len(new_tweets) == 0:
logger.info("Got {} tweets from username {}".format(len(tweets), user))
return tweets
tweets += new_tweets
if limit and len(tweets) >= limit:
logger.info("Got {} tweets from username {}".format(len(tweets), user))
return tweets
except KeyboardInterrupt:
logger.info("Program interrupted by user. Returning tweets gathered "
"so far...")
except BaseException:
logger.exception("An unknown error occurred! Returning tweets "
"gathered so far.")
logger.info("Got {} tweets from username {}.".format(
len(tweets), user))
return tweets
def query_user_page(url, retry=10):
"""
Returns the scraped user data from a twitter user page.
:param url: The URL to get the twitter user info from (url contains the user page)
:param retry: Number of retries if something goes wrong.
:return: Returns the scraped user data from a twitter user page.
"""
try:
response = requests.get(url, headers=HEADER)
html = response.text or ''
user_info = User.from_html(html)
if not user_info:
return None
return user_info
except requests.exceptions.HTTPError as e:
logger.exception('HTTPError {} while requesting "{}"'.format(
e, url))
except requests.exceptions.ConnectionError as e:
logger.exception('ConnectionError {} while requesting "{}"'.format(
e, url))
except requests.exceptions.Timeout as e:
logger.exception('TimeOut {} while requesting "{}"'.format(
e, url))
if retry > 0:
logger.info('Retrying... (Attempts left: {})'.format(retry))
return query_user_page(url, retry-1)
logger.error('Giving up.')
return None
def query_user_info(user):
"""
Returns the scraped user data from a twitter user page.
:param user: the twitter user to web scrape its twitter page info
"""
try:
user_info = query_user_page(INIT_URL_USER.format(u=user))
if user_info:
logger.info("Got user information from username {}".format(user))
return user_info
except KeyboardInterrupt:
logger.info("Program interrupted by user. Returning user information gathered so far...")
except BaseException:
logger.exception("An unknown error occurred! Returning user information gathered so far...")
logger.info("Got user information from username {}".format(user))
return user_info
|
the-stack_0_8907 | # qubit number=4
# total number=47
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=28
prog.h(input_qubit[2]) # number=39
prog.cz(input_qubit[0],input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=41
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[2]) # number=36
prog.cz(input_qubit[3],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=38
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[2],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.h(input_qubit[0]) # number=19
prog.cz(input_qubit[2],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=21
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[2],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.x(input_qubit[2]) # number=42
prog.x(input_qubit[2]) # number=43
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class3146.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_8909 | #!/usr/bin/env python
# Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""ETOS suite runner module."""
import os
import logging
import traceback
import signal
from etos_lib import ETOS
from etos_lib.logging.logger import FORMAT_CONFIG
from etos_suite_runner.lib.runner import SuiteRunner
from etos_suite_runner.lib.esr_parameters import ESRParameters
# Remove spam from pika.
logging.getLogger("pika").setLevel(logging.WARNING)
LOGGER = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(os.path.relpath(__file__))
class EnvironmentProviderException(Exception):
"""Exception from EnvironmentProvider."""
def __init__(self, msg, task_id):
"""Initialize with task_id."""
self.task_id = task_id
super().__init__(msg)
class ESR: # pylint:disable=too-many-instance-attributes
"""Suite runner for ETOS main program.
Run this as a daemon on your system in order to trigger test suites within
the eiffel event system.
"""
def __init__(self):
"""Initialize ESR by creating a rabbitmq publisher."""
self.logger = logging.getLogger("ESR")
self.etos = ETOS(
"ETOS Suite Runner", os.getenv("SOURCE_HOST"), "ETOS Suite Runner"
)
signal.signal(signal.SIGTERM, self.graceful_exit)
self.params = ESRParameters(self.etos)
FORMAT_CONFIG.identifier = self.params.tercc.meta.event_id
self.etos.config.rabbitmq_publisher_from_environment()
self.etos.start_publisher()
self.etos.config.set(
"WAIT_FOR_ENVIRONMENT_TIMEOUT",
int(os.getenv("ESR_WAIT_FOR_ENVIRONMENT_TIMEOUT")),
)
def _request_environment(self):
"""Request an environment from the environment provider.
:return: Task ID and an error message.
:rtype: tuple
"""
params = {"suite_id": self.params.tercc.meta.event_id}
wait_generator = self.etos.http.retry(
"POST", self.etos.debug.environment_provider, json=params
)
task_id = None
result = {}
try:
for response in wait_generator:
result = response.get("result", "")
if response and result and result.lower() == "success":
task_id = response.get("data", {}).get("id")
break
continue
else:
return None, "Did not retrieve an environment"
except ConnectionError as exception:
return None, str(exception)
return task_id, ""
def _wait_for_environment(self, task_id):
"""Wait for an environment being provided.
:param task_id: Task ID to wait for.
:type task_id: str
:return: Environment and an error message.
:rtype: tuple
"""
timeout = self.etos.config.get("WAIT_FOR_ENVIRONMENT_TIMEOUT")
wait_generator = self.etos.utils.wait(
self.etos.http.wait_for_request,
uri=self.etos.debug.environment_provider,
timeout=timeout,
params={"id": task_id},
)
environment = None
result = {}
response = None
for generator in wait_generator:
for response in generator:
result = response.get("result", {})
if response and result and result.get("error") is None:
environment = response
break
if result and result.get("error"):
return None, result.get("error")
if environment is not None:
break
else:
if result and result.get("error"):
return None, result.get("error")
return (
None,
(
"Unknown Error: Did not receive an environment "
f"within {self.etos.debug.default_http_timeout}s"
),
)
return environment, ""
def _release_environment(self, task_id):
"""Release an environment from the environment provider.
:param task_id: Task ID to release.
:type task_id: str
"""
wait_generator = self.etos.http.wait_for_request(
self.etos.debug.environment_provider, params={"release": task_id}
)
for response in wait_generator:
if response:
break
def _reserve_workers(self):
"""Reserve workers for test."""
LOGGER.info("Request environment from environment provider")
task_id, msg = self._request_environment()
if task_id is None:
raise EnvironmentProviderException(msg, task_id)
LOGGER.info("Wait for environment to become ready.")
environment, msg = self._wait_for_environment(task_id)
if environment is None:
raise EnvironmentProviderException(msg, task_id)
return environment, task_id
def run_suite(self, triggered):
"""Trigger an activity and starts the actual test runner.
Will only start the test activity if there's a 'slot' available.
:param triggered: Activity triggered.
:type triggered: :obj:`eiffel.events.EiffelActivityTriggeredEvent`
"""
context = triggered.meta.event_id
LOGGER.info("Sending ESR Docker environment event.")
self.etos.events.send_environment_defined(
"ESR Docker", {"CONTEXT": context}, image=os.getenv("SUITE_RUNNER")
)
runner = SuiteRunner(self.params, self.etos, context)
task_id = None
try:
LOGGER.info("Wait for test environment.")
environment, task_id = self._reserve_workers()
self.etos.events.send_activity_started(triggered, {"CONTEXT": context})
LOGGER.info("Starting ESR.")
runner.run(environment.get("result"))
except EnvironmentProviderException as exception:
task_id = exception.task_id
raise
finally:
LOGGER.info("Release test environment.")
if task_id is not None:
self._release_environment(task_id)
@staticmethod
def verify_input():
"""Verify that the data input to ESR are correct."""
assert os.getenv(
"SUITE_RUNNER"
), "SUITE_RUNNER enviroment variable not provided."
assert os.getenv(
"SOURCE_HOST"
), "SOURCE_HOST environment variable not provided."
assert os.getenv("TERCC"), "TERCC environment variable not provided."
def run(self):
"""Run the ESR main loop."""
tercc_id = None
try:
tercc_id = self.params.tercc.meta.event_id
self.etos.events.send_announcement_published(
"[ESR] Launching.",
"Starting up ESR. Waiting for tests to start.",
"MINOR",
{"CAUSE": tercc_id},
)
activity_name = "ETOS testrun"
links = {
"CAUSE": [
self.params.tercc.meta.event_id,
self.params.artifact_created["meta"]["id"],
]
}
triggered = self.etos.events.send_activity_triggered(
activity_name,
links,
executionType="AUTOMATED",
triggers=[{"type": "EIFFEL_EVENT"}],
)
self.verify_input()
context = triggered.meta.event_id
except: # noqa
self.etos.events.send_announcement_published(
"[ESR] Failed to start test execution",
traceback.format_exc(),
"CRITICAL",
{"CAUSE": tercc_id},
)
raise
try:
self.run_suite(triggered)
self.etos.events.send_activity_finished(
triggered, {"conclusion": "SUCCESSFUL"}, {"CONTEXT": context}
)
except Exception as exception: # pylint:disable=broad-except
reason = str(exception)
self.etos.events.send_activity_canceled(
triggered, {"CONTEXT": context}, reason=reason
)
self.etos.events.send_announcement_published(
"[ESR] Test suite execution failed",
traceback.format_exc(),
"MAJOR",
{"CONTEXT": context},
)
raise
def graceful_exit(self, *_):
"""Attempt to gracefully exit the running job."""
self.logger.info(
"Kill command received - Attempting to shut down all processes."
)
raise Exception("Terminate command received - Shutting down.")
def main():
"""Entry point allowing external calls."""
esr = ESR()
try:
esr.run() # Blocking
except:
with open("/dev/termination-log", "w", encoding="utf-8") as termination_log:
termination_log.write(traceback.format_exc())
raise
finally:
esr.etos.publisher.stop()
LOGGER.info("ESR Finished Executing.")
def run():
"""Entry point for console_scripts."""
main()
if __name__ == "__main__":
run()
|
the-stack_0_8911 | # a cursor is the object we use to interact with the database
import pymysql.cursors
# this class will give us an instance of a connection to our database
class MySQLConnection:
def __init__(self, db):
# change the user and password as needed
connection = pymysql.connect(host = 'localhost',
user = 'root',
password = 'root', # CHANGE THIS IF YOU USE A DIFFERENT PASSWORD IN MySql Workbench!
db = db,
charset = 'utf8mb4',
cursorclass = pymysql.cursors.DictCursor,
autocommit = True)
# establish the connection to the database
self.connection = connection
# the method to query the database
def query_db(self, query, data=None):
with self.connection.cursor() as cursor:
try:
query = cursor.mogrify(query, data)
print("Running Query:", query)
cursor.execute(query, data)
if query.lower().find("insert") >= 0:
# INSERT queries will return the ID NUMBER of the row inserted
self.connection.commit()
return cursor.lastrowid
elif query.lower().find("select") >= 0:
# SELECT queries will return the data from the database as a LIST OF DICTIONARIES
result = cursor.fetchall()
return result
else:
# UPDATE and DELETE queries will return nothing
self.connection.commit()
except Exception as e:
# if the query fails the method will return FALSE
print("Something went wrong", e)
return False
finally:
# close the connection
self.connection.close()
# connectToMySQL receives the database we're using and uses it to create an instance of MySQLConnection
def connectToMySQL(db):
return MySQLConnection(db) |
the-stack_0_8913 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# xpaw documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 16 11:08:48 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import re
from os.path import join, dirname
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'xpaw'
copyright = '2016-2018, jadbin'
author = 'jadbin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
def read_version():
p = join(dirname(dirname(__file__)), 'xpaw', '__init__.py')
with open(p, 'r', encoding='utf-8') as f:
return re.search(r"__version__ = '([^']+)'", f.read()).group(1)
version = read_version()
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'description': 'Async web scraping framework',
'github_user': 'jadbin',
'github_repo': 'xpaw',
'github_button': False,
'travis_button': True,
'font_family': '"Helvetica Neue", Helvetica, "PingFang SC", "Hiragino Sans GB", "Microsoft YaHei", "微软雅黑", Arial, sans-serif',
'font_size': '14px',
'code_font_size': '12px',
'note_bg': '#E5ECD1',
'note_border': '#BFCF8C',
}
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'xpawdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'xpaw.tex', 'xpaw Documentation',
'jadbin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'xpaw', 'xpaw Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'xpaw', 'xpaw Documentation',
author, 'xpaw', 'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_8914 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import matrix, parametrize
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService, quorum
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int_with_prefix
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1, LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, LATEST_2_6, LATEST_2_7, LATEST_2_8, KafkaVersion
class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest):
"""
These tests validate that we can use a new client to produce and consume from older brokers.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=3) if quorum.for_test(test_context) == quorum.zk else None
self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
"partitions": 10,
"replication-factor": 2}})
self.num_partitions = 10
self.timeout_sec = 60
self.producer_throughput = 1000
self.num_producers = 2
self.messages_per_producer = 1000
self.num_consumers = 1
def setUp(self):
if self.zk:
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(ClientCompatibilityProduceConsumeTest, self).min_cluster_size() + self.num_producers + self.num_consumers
@cluster(num_nodes=9)
@matrix(broker_version=[str(DEV_BRANCH)], metadata_quorum=quorum.all_non_upgrade)
@parametrize(broker_version=str(LATEST_0_10_0))
@parametrize(broker_version=str(LATEST_0_10_1))
@parametrize(broker_version=str(LATEST_0_10_2))
@parametrize(broker_version=str(LATEST_0_11_0))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_5))
@parametrize(broker_version=str(LATEST_2_6))
@parametrize(broker_version=str(LATEST_2_7))
@parametrize(broker_version=str(LATEST_2_8))
def test_produce_consume(self, broker_version, metadata_quorum=quorum.zk):
print("running producer_consumer_compat with broker_version = %s" % broker_version, flush=True)
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000,
message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
|
the-stack_0_8916 | # This is a sample commands.py. You can add your own commands here.
#
# Please refer to commands_full.py for all the default commands and a complete
# documentation. Do NOT add them all here, or you may end up with defunct
# commands when upgrading ranger.
# A simple command for demonstration purposes follows.
# -----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function)
# You can import any python module as needed.
import os
# You always need to import ranger.api.commands here to get the Command class:
from ranger.api.commands import Command
from ranger.core.loader import CommandLoader
# Any class that is a subclass of "Command" will be integrated into ranger as a
# command. Try typing ":my_edit<ENTER>" in ranger!
class my_edit(Command):
# The so-called doc-string of the class will be visible in the built-in
# help that is accessible by typing "?c" inside ranger.
""":my_edit <filename>
A sample command for demonstration purposes that opens a file in an editor.
"""
# The execute method is called when you run this command in ranger.
def execute(self):
# self.arg(1) is the first (space-separated) argument to the function.
# This way you can write ":my_edit somefilename<ENTER>".
if self.arg(1):
# self.rest(1) contains self.arg(1) and everything that follows
target_filename = self.rest(1)
else:
# self.fm is a ranger.core.filemanager.FileManager object and gives
# you access to internals of ranger.
# self.fm.thisfile is a ranger.container.file.File object and is a
# reference to the currently selected file.
target_filename = self.fm.thisfile.path
# This is a generic function to print text in ranger.
self.fm.notify("Let's edit the file " + target_filename + "!")
# Using bad=True in fm.notify allows you to print error messages:
if not os.path.exists(target_filename):
self.fm.notify("The given file does not exist!", bad=True)
return
# This executes a function from ranger.core.acitons, a module with a
# variety of subroutines that can help you construct commands.
# Check out the source, or run "pydoc ranger.core.actions" for a list.
self.fm.edit_file(target_filename)
# The tab method is called when you press tab, and should return a list of
# suggestions that the user will tab through.
# tabnum is 1 for <TAB> and -1 for <S-TAB> by default
def tab(self, tabnum):
# This is a generic tab-completion function that iterates through the
# content of the current directory.
return self._tab_directory_content()
class extracthere(Command):
def execute(self):
""" Extract copied files to current directory """
copied_files = tuple(self.fm.copy_buffer)
if not copied_files:
return
def refresh(_):
cwd = self.fm.get_directory(original_path)
cwd.load_content()
one_file = copied_files[0]
cwd = self.fm.thisdir
original_path = cwd.path
au_flags = ['-X', cwd.path]
au_flags += self.line.split()[1:]
au_flags += ['-e']
self.fm.copy_buffer.clear()
self.fm.cut_buffer = False
if len(copied_files) == 1:
descr = "extracting: " + os.path.basename(one_file.path)
else:
descr = "extracting files from: " + os.path.basename(one_file.dirname)
obj = CommandLoader(args=['aunpack'] + au_flags \
+ [f.path for f in copied_files], descr=descr, read=True)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
class compress(Command):
def execute(self):
""" Compress marked files to current directory """
cwd = self.fm.thisdir
marked_files = cwd.get_selection()
if not marked_files:
return
def refresh(_):
cwd = self.fm.get_directory(original_path)
cwd.load_content()
original_path = cwd.path
parts = self.line.split()
au_flags = parts[1:]
descr = "compressing files in: " + os.path.basename(parts[1])
obj = CommandLoader(args=['apack'] + au_flags + \
[os.path.relpath(f.path, cwd.path) for f in marked_files], descr=descr, read=True)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
def tab(self, tabnum):
""" Complete with current folder name """
extension = ['.zip', '.tar.gz', '.rar', '.7z']
return ['compress ' + os.path.basename(self.fm.thisdir.path) + ext for ext in extension]
|
the-stack_0_8918 | import string
import os
def clean_name(name):
name = name.lower()
name = name.strip()
name = name.replace('\'', '')
name = name.replace('-', ' ')
return name.translate(str.maketrans("", "", string.punctuation))
class NameConverter:
def __init__(self):
self.color_map = {}
location = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(location + '/colors.csv') as colors:
for line in colors:
line = line.strip()
(name, r, g, b) = line.split(',')
name = clean_name(name)
self.color_map[name] = (int(r), int(g), int(b))
def convert(self, color):
key_name = clean_name(color)
return self.color_map.get(key_name, None)
|
the-stack_0_8919 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
class TestDelegate(MessagePumpDelegate):
def __init__(self):
self.log = []
def schedule(self, interval, callback):
self.callback = callback
self.log.append("schedule")
def message_available(self, message):
self.log.append("message_available: %s" % message)
def final_message_delivered(self):
self.log.append("final_message_delivered")
class MessagePumpTest(unittest.TestCase):
def test_basic(self):
queue = ThreadedMessageQueue()
delegate = TestDelegate()
pump = MessagePump(delegate, queue)
self.assertEqual(delegate.log, [
'schedule'
])
delegate.callback()
queue.post("Hello")
queue.post("There")
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule'
])
queue.post("More")
queue.post("Messages")
queue.stop()
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule',
'message_available: More',
'message_available: Messages',
'final_message_delivered'
])
|
the-stack_0_8923 | """sc-githooks - The base check class
Copyright (c) 2021 Scott Lau
Portions Copyright (c) 2021 InnoGames GmbH
Portions Copyright (c) 2021 Emre Hasegeli
"""
from enum import IntEnum
class CheckState(IntEnum):
NEW = 0
CLONED = 1
DONE = 2
FAILED = 3
class Severity(IntEnum):
# The numbers are selected to match the Syslog standard.
ERROR = 3
WARNING = 4
NOTICE = 5
NOTE = 5
INFO = 6
def translate(self):
if self.__eq__(Severity.ERROR):
return "错误"
elif self.__eq__(Severity.WARNING):
return "警告"
elif self.__eq__(Severity.NOTICE):
return "注意"
elif self.__eq__(Severity.NOTE):
return "注意"
elif self.__eq__(Severity.INFO):
return "信息"
else:
return "未知"
@classmethod
def split(cls, line):
"""Search the severities in the beginning of the string
It returns the highest severity when non match.
"""
for name, severity in cls._member_map_.items():
if line.upper().startswith(name):
line = line[len(name):].strip(' :-')
break
return severity, line
class BaseCheck:
"""The parent class of all checks
Checks are expanded to different objects by cloning. The subclasses
has to override prepare() method to clone the check at appropriate
stage.
"""
preferred_checks = []
state = CheckState.NEW
ERROR_MSG_PREFIX = "GL-HOOK-ERR:"
def __init__(self, **kwargs):
for key, value in kwargs.items():
# We expect all of the arguments to be initialized with defaults
# on the class.
assert hasattr(type(self), key)
if value:
setattr(self, key, value)
def clone(self):
new = type(self)(**vars(self))
new.state = CheckState.CLONED
return new
def set_state(self, state):
assert state > CheckState.CLONED
self.state = max(self.state, state)
def prepare(self, obj):
for check in self.preferred_checks:
if check.prepare(obj):
return None
return self
def print_problems(self):
header_printed = False
for severity, problem in self.evaluate_problems():
if not header_printed:
print('{} === {} ==='.format(BaseCheck.ERROR_MSG_PREFIX, self))
header_printed = True
print('{} {}: {}'.format(BaseCheck.ERROR_MSG_PREFIX, severity.translate(), problem))
# if header_printed:
# print('{}'.format(BaseCheck.ERROR_MSG_PREFIX))
self.set_state(CheckState.DONE)
def evaluate_problems(self):
assert self.state == CheckState.CLONED
for severity, problem in self.get_problems():
if severity <= Severity.ERROR:
self.set_state(CheckState.FAILED)
yield severity, problem
def __str__(self):
return type(self).__name__
def prepare_checks(checks, obj, next_checks=None):
"""Prepare the checks to the object
It yields the checks prepared and ready. The checks which are not
ready yet are going do be appended to the next_checks list.
"""
for check in checks:
prepared_check = check.prepare(obj)
if prepared_check:
cloned = prepared_check.state >= CheckState.CLONED
assert next_checks is not None or cloned
if cloned:
yield prepared_check
else:
next_checks.append(prepared_check)
|
the-stack_0_8926 | import logging
import numpy as np
import pandas as pd
from random import shuffle
import models
from common.constant.df_from_csv import LISTENING_DF, SP_I_DF, SP_O_DF
from common.constant.message_type import MessageType
from core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator
class ReactionResponseGenerator(BaseResponseGenerator):
def __call__(self):
try:
responses = self.generate_reaction_by_type(self.user.id, self.message_type, self.message.text_kw_df)
self.set_regular_response(responses)
return self.response_data
except:
return self.get_error_response_data()
@classmethod
def generate_reaction_by_type(cls, user_id, reaction_type, text_kw_df):
try:
used_reaction_numbers_list = models.Reaction.find_used_reaction_number(user_id, reaction_type)
if reaction_type == MessageType.SPECIAL.value:
responses = [
cls.__find_special_reaction(used_reaction_numbers_list, text_kw_df, user_id, reaction_type)
]
else:
responses = [
cls.__find_basic_reaction(used_reaction_numbers_list, user_id, reaction_type)]
return responses
except:
logging.exception('')
return []
@staticmethod
def generate_listening():
try:
listening = LISTENING_DF[LISTENING_DF.type == 1].text.values
response_list = [np.random.choice(listening, 1)[0]]
return response_list
except:
logging.exception('')
return []
@classmethod
def __find_special_reaction(cls, used_reaction_numbers_list, text_kw_df, user_id, reaction_type):
special_words = text_kw_df[text_kw_df.special != 'normal'].word.tolist()
special_word = special_words[-1]
# e.g. id = alone, cry, etc
special_word_id = SP_I_DF[SP_I_DF.word == special_word]['id'].values[0]
target_id_list = SP_O_DF[SP_O_DF['id'] == special_word_id].index.tolist()
if len(used_reaction_numbers_list) == len(target_id_list):
models.Reaction.enable_reaction_number(user_id, reaction_type)
sp_id_list = used_reaction_numbers_list
else:
sp_id_list = SP_O_DF[
(SP_O_DF.id == special_word_id)
& ~(SP_O_DF.index.isin(used_reaction_numbers_list))
].index.tolist()
shuffle(sp_id_list)
sp_id = sp_id_list[0]
models.Reaction.disable_reaction_number(user_id, sp_id, reaction_type)
sp_reaction = SP_O_DF[SP_O_DF.index == sp_id].output.values[0]
sp_reaction = sp_reaction.replace('\\n', '\n')
return sp_reaction
@classmethod
def __find_basic_reaction(cls, used_reaction_numbers_list, user_id, reaction_type):
try:
used_reaction_numbers_list = list(set(used_reaction_numbers_list))
rdf = pd.read_csv('./csv_files/reactions.csv')
target_id_list = rdf[rdf['type'] == reaction_type].index.tolist()
if any(i not in target_id_list for i in used_reaction_numbers_list):
# In this case, reactions.csv has changed. so set all reations status = 1
models.Reaction.enable_reaction_number(user_id, reaction_type, used_reaction_numbers_list)
candidate_id_list = target_id_list
elif len(used_reaction_numbers_list) == len(target_id_list):
models.Reaction.enable_reaction_number(user_id, reaction_type)
candidate_id_list = used_reaction_numbers_list
else:
candidate_id_list = rdf[
(rdf['type'] == reaction_type)
& ~(rdf.index.isin(used_reaction_numbers_list))
].index.tolist()
shuffle(candidate_id_list)
r_id = candidate_id_list[0]
models.Reaction.disable_reaction_number(user_id, r_id, reaction_type)
r = rdf[rdf.index == r_id].reaction.values[0]
r = r.replace('\\n', '\n')
return r
except:
logging.exception('')
return ''
|
the-stack_0_8928 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages, setup
import tensorboard.version
REQUIRED_PACKAGES = [
'numpy >= 1.12.0',
'six >= 1.10.0',
'protobuf >= 3.4.0',
'werkzeug >= 0.11.10',
'html5lib == 0.9999999', # identical to 1.0b8
'markdown >= 2.6.8',
'bleach == 1.5.0',
# futures is a backport of the python 3.2+ concurrent.futures module
'futures >= 3.1.1; python_version < "3"',
# python3 specifically requires wheel 0.26
'wheel; python_version < "3"',
'wheel >= 0.26; python_version >= "3"',
]
CONSOLE_SCRIPTS = [
'tensorboard = tensorboard.main:run_main',
]
def get_readme():
with open('tensorboard/pip_package/README.rst') as f:
return f.read()
setup(
name='tensorflow-tensorboard',
version=tensorboard.version.VERSION.replace('-', ''),
description='TensorBoard lets you watch Tensors Flow',
long_description=get_readme(),
url='https://github.com/tensorflow/tensorboard',
author='Google Inc.',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
package_data={
'tensorboard': [
'webfiles.zip',
],
},
# Disallow python 3.0 and 3.1 which lack a 'futures' module (see above).
python_requires='>= 2.7, != 3.0.*, != 3.1.*',
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensorboard tensor machine learning visualizer',
)
|
the-stack_0_8929 | from bs4 import BeautifulSoup
import requests,datetime
top_news = {"world":[],"business":[],"technology":[],"sports":[],"entertainment":[]}
def Scraper_news():
new_dic = {}
URLS_of_menu = {"world":"http://www.newzcone.com/world/","business":"http://www.newzcone.com/business/","technology":"http://www.newzcone.com/technology/networking-telecom/","sports":"http://www.newzcone.com/sports/","entertainment":"http://www.newzcone.com/entertainment/"}
Today = datetime.date.today()
today = ""
for string in str(Today):
if string == "-":
today +="/"
else:
today+=string
for key in URLS_of_menu:
url = URLS_of_menu[key]
html = requests.get(url)
soup = BeautifulSoup(html.text,"html.parser")
findingUrl = soup.findAll("div",class_="news-entry")
for div in findingUrl:
a_tags = div.findAll("a")
count = 0
for a in a_tags[1:15]:
new_dic["Date"] = today
new_dic["Discription"] = a.get_text().strip()
new_dic["News_URL"] = a["href"]
html = requests.get(a["href"])
needsoup = BeautifulSoup(html.text,"html.parser")
get_title = needsoup.title.get_text().strip()
new_dic["Title"] = get_title
count +=1
if count == 5:
break
top_news[key].append(new_dic.copy())
return(top_news)
|
the-stack_0_8931 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
from nova.image import glance
class Controller(object):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_service = glance.get_default_image_service()
def _get_image(self, context, image_id):
try:
return self.image_service.show(context, image_id)
except exception.NotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if 'metadata' in body:
for key, value in body['metadata'].iteritems():
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
image = self.image_service.update(context, image_id, image, None)
return dict(metadata=image['properties'])
@wsgi.serializers(xml=common.MetaItemTemplate)
@wsgi.deserializers(xml=common.MetaItemDeserializer)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
try:
meta = body['meta']
except KeyError:
expl = _('Incorrect request body format')
raise exc.HTTPBadRequest(explanation=expl)
if not id in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
self.image_service.update(context, image_id, image, None)
return dict(meta=meta)
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
self.image_service.update(context, image_id, image, None)
return dict(metadata=metadata)
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if not id in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
self.image_service.update(context, image_id, image, None)
def create_resource():
return wsgi.Resource(Controller())
|
the-stack_0_8932 | import contextlib
import re
import sys
class ColorContext(object):
"""
A context manager for terminal text colors.
Context usage:
with blue:
print 'this is blue'
with red:
print 'this is red'
print 'blue again!'
Callable usage that can break nested colors:
with purple:
print 'this is purple'
print yellow('this is yellow')
print 'this is not purple!'
"""
end = '\033[0m'
stack = [end]
def __init__(self, start):
self.start = start
def __call__(self, text):
"""Colorize some text. Cannot be nested; use as a context instead."""
return self.start + text + self.end
def __enter__(self):
code = self.start
sys.stdout.write(code)
sys.stderr.write(code)
self.stack.append(code)
def __exit__(self, type, value, traceback):
self.stack.pop()
sys.stdout.write(self.stack[-1])
sys.stderr.write(self.stack[-1])
blue = blue_text = ColorContext('\033[94m')
default = default_text_color = ColorContext(ColorContext.end)
green = green_text = ColorContext('\033[92m')
purple = purple_text = ColorContext('\033[95m')
red = red_text = ColorContext('\033[91m')
yellow = yellow_text = ColorContext('\033[93m')
class FilteredStdOut(object):
_re_type = type(re.compile(''))
def __init__(self, stdout, re_pattern):
self.stdout = stdout
if not isinstance(re_pattern, self._re_type):
re_pattern = re.compile(re_pattern)
self.pattern = re_pattern
self.blocked = False
def __getattr__(self, name):
return getattr(self.stdout, name)
def write(self, string):
if self.pattern.search(string):
self.blocked = True
elif self.blocked:
self.blocked = False
# The print statement writes the newline character afterwards,
# so this keeps track if what has been filtered out, and then
# avoids writing whitespace directly afterwards.
if string.strip():
self.stdout.write(string)
else:
self.stdout.write(string)
@contextlib.contextmanager
def do_not_print(re_pattern):
"""Stop certain messages from being printed to stdout."""
stdout = sys.stdout
sys.stdout = FilteredStdOut(stdout, re_pattern)
try:
yield
finally:
sys.stdout = stdout
|
the-stack_0_8933 | #!/usr/bin/env python
from txros import util
from twisted.internet import defer
from navigator import Navigator
import numpy as np
from mil_tools import rosmsg_to_numpy
from geometry_msgs.msg import Vector3Stamped
class PingerAndy(Navigator):
'''
Mission to run sonar start gate challenge using Andy's sonar system, which produces a vector pointing towards the
'''
@classmethod
def init(cls):
cls.pinger_heading = cls.nh.subscribe("/hydrophones/ping_direction", Vector3Stamped)
@staticmethod
def line(p1, p2):
'''
Return equation of a line given two 2D points
https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python
'''
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
@staticmethod
def intersection(L1, L2):
'''
Return point intersection (if it exsists) of two lines given their equations obtained from the line method
https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python
'''
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return None
@util.cancellableInlineCallbacks
def get_gates(self):
totems = []
for i in range(4):
while True:
self.send_feedback('Click on totem {} in rviz'.format(i + 1))
point = yield self.rviz_point.get_next_message()
if point.header.frame_id != 'enu':
self.send_feedback('Point is not in ENU.\
Please switch rviz frame to ENU or tell kevin to support other frames.')
continue
break
self.send_feedback('Recieved point for totem {}'.format(i + 1))
point = rosmsg_to_numpy(point.point)
point[2] = 0.0
totems.append(np.array(point))
# Create list of gates halfway between each pair of totems
gates = []
for i in range(3):
gates.append((totems[i] + totems[i + 1]) / 2.0)
defer.returnValue(gates)
@util.cancellableInlineCallbacks
def run(self, args):
# Get position of 3 gates based on position of totems
gates = yield self.get_gates()
# Get heading towards pinger from Andy hydrophone system
self.send_feedback('All gates clicked on! Waiting for pinger heading...')
heading = yield self.pinger_heading.get_next_message()
self.send_feedback('Recieved pinger heading')
# Convert heading and hydophones from to enu
hydrophones_to_enu = yield self.tf_listener.get_transform('enu', heading.header.frame_id)
hydrophones_origin = hydrophones_to_enu._p[0:2]
heading = rosmsg_to_numpy(heading.vector)
heading_enu = hydrophones_to_enu.transform_vector(heading)
heading_enu = heading_enu[0:2] / np.linalg.norm(heading_enu[0:2])
pinger_line = self.line(hydrophones_origin, hydrophones_origin + heading_enu)
gates_line = self.line(gates[0], gates[-1])
# Find intersection of these two lines. This is the approximate position of the pinger
intersection = self.intersection(pinger_line, gates_line)
if intersection is None:
raise Exception('No intersection')
self.send_feedback('Pinger is roughly at {}'.format(intersection))
distances = []
for gate in gates:
distances.append(np.linalg.norm(gate[0:2] - intersection))
argmin = np.argmin(np.array(distances))
self.send_feedback('Pinger is likely at gate {}'.format(argmin + 1))
gate = gates[argmin][:2]
between_vector = (gates[0] - gates[-1])[:2]
# Rotate that vector to point through the buoys
c = np.cos(np.radians(90))
s = np.sin(np.radians(90))
R = np.array([[c, -s], [s, c]])
direction_vector = R.dot(between_vector)
direction_vector /= np.linalg.norm(direction_vector)
position = self.pose[0][:2]
if np.linalg.norm(position - (gate + direction_vector)) > np.linalg.norm(position - (gate - direction_vector)):
direction_vector = -direction_vector
before_distance = 3.0
after_distance = 5.0
before = np.append(gate + direction_vector * before_distance, 0)
after = np.append(gate - direction_vector * after_distance, 0)
self.send_feedback('Moving in front of gate')
yield self.move.set_position(before).look_at(after).go()
self.send_feedback('Going through')
yield self.move.set_position(after).go()
defer.returnValue('My god it actually worked!')
|
the-stack_0_8934 | from bitmovin.resources import AbstractIdResource
class EncodingStatus(AbstractIdResource):
def __init__(self, status, number_of_segments=None, id_=None, messages=None, subtasks=None,
created_at=None, queued_at=None, finished_at=None, error_at=None):
super().__init__(id_=id_)
self.status = status
self.numberOfSegments = number_of_segments
self.messages = messages
self.subtasks = subtasks
self.created_at = created_at
self.queued_at = queued_at
self.finished_at = finished_at
self.error_at = error_at
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object.get('id')
status = json_object['status']
messages = json_object.get('messages')
subtasks = json_object.get('subtasks')
created_at = json_object.get('createdAt')
queued_at = json_object.get('queuedAt')
finished_at = json_object.get('finishedAt')
error_at = json_object.get('errorAt')
number_of_segments = json_object.get('numberOfSegments')
encoding_status = EncodingStatus(status=status, number_of_segments=number_of_segments, id_=id_,
messages=messages, subtasks=subtasks, created_at=created_at,
queued_at=queued_at, finished_at=finished_at, error_at=error_at)
return encoding_status
|
the-stack_0_8935 | """Tools for simulation of transients."""
from __future__ import print_function
import sys
import math
import copy
from collections import OrderedDict
import numpy as np
from numpy import random
from scipy.interpolate import InterpolatedUnivariateSpline as Spline1d
from astropy.table import Table
from astropy.cosmology import FlatLambdaCDM
from astropy.extern.six.moves import range
from .utils import alias_map
__all__ = ['zdist', 'realize_lcs']
WHOLESKY_SQDEG = 4. * np.pi * (180. / np.pi) ** 2
def zdist(zmin, zmax, time=365.25, area=1.,
ratefunc=lambda z: 1.e-4,
cosmo=FlatLambdaCDM(H0=70.0, Om0=0.3)):
"""Generate a distribution of redshifts.
Generates the correct redshift distribution and number of SNe, given
the input volumetric SN rate, the cosmology, and the observed area and
time.
Parameters
----------
zmin, zmax : float
Minimum and maximum redshift.
time : float, optional
Time in days (default is 1 year).
area : float, optional
Area in square degrees (default is 1 square degree). ``time`` and
``area`` are only used to determine the total number of SNe to
generate.
ratefunc : callable
A callable that accepts a single float (redshift) and returns the
comoving volumetric rate at each redshift in units of yr^-1 Mpc^-3.
The default is a function that returns ``1.e-4``.
cosmo : `~astropy.cosmology.Cosmology`, optional
Cosmology used to determine volume. The default is a FlatLambdaCDM
cosmology with ``Om0=0.3``, ``H0=70.0``.
Examples
--------
Loop over the generator:
>>> for z in zdist(0.0, 0.25):
... print(z)
...
0.151285827576
0.204078030595
0.201009196731
0.181635472172
0.17896188781
0.226561237264
0.192747368762
This tells us that in one observer-frame year, over 1 square
degree, 7 SNe occured at redshifts below 0.35 (given the default
volumetric SN rate of 10^-4 SNe yr^-1 Mpc^-3). The exact number is
drawn from a Poisson distribution.
Generate the full list of redshifts immediately:
>>> zlist = list(zdist(0., 0.25))
Define a custom volumetric rate:
>>> def snrate(z):
... return 0.5e-4 * (1. + z)
...
>>> zlist = list(zdist(0., 0.25, ratefunc=snrate))
"""
# Get comoving volume in each redshift shell.
z_bins = 100 # Good enough for now.
z_binedges = np.linspace(zmin, zmax, z_bins + 1)
z_binctrs = 0.5 * (z_binedges[1:] + z_binedges[:-1])
sphere_vols = cosmo.comoving_volume(z_binedges).value
shell_vols = sphere_vols[1:] - sphere_vols[:-1]
# SN / (observer year) in shell
shell_snrate = np.array([shell_vols[i] *
ratefunc(z_binctrs[i]) / (1.+z_binctrs[i])
for i in range(z_bins)])
# SN / (observer year) within z_binedges
vol_snrate = np.zeros_like(z_binedges)
vol_snrate[1:] = np.add.accumulate(shell_snrate)
# Create a ppf (inverse cdf). We'll use this later to get
# a random SN redshift from the distribution.
snrate_cdf = vol_snrate / vol_snrate[-1]
snrate_ppf = Spline1d(snrate_cdf, z_binedges, k=1)
# Total numbe of SNe to simulate.
nsim = vol_snrate[-1] * (time/365.25) * (area/WHOLESKY_SQDEG)
for i in range(random.poisson(nsim)):
yield float(snrate_ppf(random.random()))
OBSERVATIONS_ALIASES = OrderedDict([
('time', set(['time', 'date', 'jd', 'mjd', 'mjdobs', 'mjd_obs'])),
('band', set(['band', 'bandpass', 'filter', 'flt'])),
('zp', set(['zp', 'zpt', 'zeropoint', 'zero_point'])),
('zpsys', set(['zpsys', 'zpmagsys', 'magsys'])),
('gain', set(['gain'])),
('skynoise', set(['skynoise']))
])
OBSERVATIONS_REQUIRED_ALIASES = ('time', 'band', 'zp', 'zpsys', 'gain',
'skynoise')
def realize_lcs(observations, model, params, thresh=None,
trim_observations=False, scatter=True):
"""Realize data for a set of SNe given a set of observations.
Parameters
----------
observations : `~astropy.table.Table` or `~numpy.ndarray`
Table of observations. Must contain the following column names:
``band``, ``time``, ``zp``, ``zpsys``, ``gain``, ``skynoise``.
model : `sncosmo.Model`
The model to use in the simulation.
params : list (or generator) of dict
List of parameters to feed to the model for realizing each light curve.
thresh : float, optional
If given, light curves are skipped (not returned) if none of the data
points have signal-to-noise greater than ``thresh``.
trim_observations : bool, optional
If True, only observations with times between
``model.mintime()`` and ``model.maxtime()`` are included in
result table for each SN. Default is False.
scatter : bool, optional
If True, the ``flux`` value of the realized data is calculated by
adding a random number drawn from a Normal Distribution with a
standard deviation equal to the ``fluxerror`` of the observation to
the bandflux value of the observation calculated from model. Default
is True.
Returns
-------
sne : list of `~astropy.table.Table`
Table of realized data for each item in ``params``.
Notes
-----
``skynoise`` is the image background contribution to the flux measurement
error (in units corresponding to the specified zeropoint and zeropoint
system). To get the error on a given measurement, ``skynoise`` is added
in quadrature to the photon noise from the source.
It is left up to the user to calculate ``skynoise`` as they see fit as the
details depend on how photometry is done and possibly how the PSF is
is modeled. As a simple example, assuming a Gaussian PSF, and perfect
PSF photometry, ``skynoise`` would be ``4 * pi * sigma_PSF * sigma_pixel``
where ``sigma_PSF`` is the standard deviation of the PSF in pixels and
``sigma_pixel`` is the background noise in a single pixel in counts.
"""
RESULT_COLNAMES = ('time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys')
lcs = []
# Copy model so we don't mess up the user's model.
model = copy.copy(model)
# get observations as a Table
if not isinstance(observations, Table):
if isinstance(observations, np.ndarray):
observations = Table(observations)
else:
raise ValueError("observations not understood")
# map column name aliases
colname = alias_map(observations.colnames, OBSERVATIONS_ALIASES,
required=OBSERVATIONS_REQUIRED_ALIASES)
# result dtype used when there are no observations
band_dtype = observations[colname['band']].dtype
zpsys_dtype = observations[colname['zpsys']].dtype
result_dtype = ('f8', band_dtype, 'f8', 'f8', 'f8', zpsys_dtype)
for p in params:
model.set(**p)
# Select times for output that fall within tmin amd tmax of the model
if trim_observations:
mask = ((observations[colname['time']] > model.mintime()) &
(observations[colname['time']] < model.maxtime()))
snobs = observations[mask]
else:
snobs = observations
# explicitly detect no observations and add an empty table
if len(snobs) == 0:
if thresh is None:
lcs.append(Table(names=RESULT_COLNAMES,
dtype=result_dtype, meta=p))
continue
flux = model.bandflux(snobs[colname['band']],
snobs[colname['time']],
zp=snobs[colname['zp']],
zpsys=snobs[colname['zpsys']])
fluxerr = np.sqrt(snobs[colname['skynoise']]**2 +
np.abs(flux) / snobs[colname['gain']])
# Scatter fluxes by the fluxerr
# np.atleast_1d is necessary here because of an apparent bug in
# np.random.normal: when the inputs are both length 1 arrays,
# the output is a Python float!
if scatter:
flux = np.atleast_1d(np.random.normal(flux, fluxerr))
# Check if any of the fluxes are significant
if thresh is not None and not np.any(flux/fluxerr > thresh):
continue
data = [snobs[colname['time']], snobs[colname['band']], flux, fluxerr,
snobs[colname['zp']], snobs[colname['zpsys']]]
lcs.append(Table(data, names=RESULT_COLNAMES, meta=p))
return lcs
|
the-stack_0_8936 | from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self):
self.wd = webdriver.Firefox()
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
def open_home_page(self):
wd = self.wd
wd.get("http://localhost/addressbook/")
def destroy(self):
self.wd.quit() |
the-stack_0_8938 | import os
import re
import sys
import glob
import json
import time
import logging
import threading
import subprocess
import six
import base64
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,
to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = 'aws:sqs'
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# whether to use our custom Java executor, or the default from lambci
# TODO: deprecated, should be removed in the future
USE_CUSTOM_JAVA_EXECUTOR = False
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info('Unable to get IP address of main Docker container "%s": %s' %
(container_name, e))
# return main container IP, or fall back to Docker host (bridge IP, or host DNS address)
return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
return result
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
""" Contains basic information about a docker container. """
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment['LOCALSTACK_HOSTNAME'] = main_endpoint
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(
base64.b64decode(to_bytes(context.client_context))))
# custom command to execute in the container
command = ''
events_file = ''
if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):
# if running a Java Lambda with our custom executor, set up classpath arguments
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
events_file = '_lambda.events.%s.json' % short_uid()
save_file(os.path.join(lambda_cwd, events_file), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
# clean up events file
events_file and os.path.exists(events_file) and rm_rf(events_file)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s' # dns
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str,
env_vars_str, network_str, dns_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = get_free_tcp_port()
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # dns
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port,
env_vars_string, network_str, dns_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # dns
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, dns_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
path_before = sys.path
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
finally:
sys.path = path_before
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# Make sure to keep the log line below, to ensure the log stream gets created
log_output = 'START: Lambda %s started via "local" executor ...' % func_arn
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[0]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append('*.jar')
entries.append('java/lib/*.jar')
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
the-stack_0_8940 | import re
from collections.abc import Iterable
from functools import partial
from graphql_relay import connection_from_array
from ..types import Boolean, Enum, Int, Interface, List, NonNull, Scalar, String, Union
from ..types.field import Field
from ..types.objecttype import ObjectType, ObjectTypeOptions
from ..utils.thenables import maybe_thenable
from .node import is_node
class PageInfo(ObjectType):
class Meta:
description = (
"The Relay compliant `PageInfo` type, containing data necessary to"
" paginate this connection."
)
has_next_page = Boolean(
required=True,
name="hasNextPage",
description="When paginating forwards, are there more items?",
)
has_previous_page = Boolean(
required=True,
name="hasPreviousPage",
description="When paginating backwards, are there more items?",
)
start_cursor = String(
name="startCursor",
description="When paginating backwards, the cursor to continue.",
)
end_cursor = String(
name="endCursor",
description="When paginating forwards, the cursor to continue.",
)
# noinspection PyPep8Naming
def page_info_adapter(startCursor, endCursor, hasPreviousPage, hasNextPage):
"""Adapter for creating PageInfo instances"""
return PageInfo(
start_cursor=startCursor,
end_cursor=endCursor,
has_previous_page=hasPreviousPage,
has_next_page=hasNextPage,
)
class ConnectionOptions(ObjectTypeOptions):
node = None
class Connection(ObjectType):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, node=None, name=None, **options):
_meta = ConnectionOptions(cls)
assert node, f"You have to provide a node in {cls.__name__}.Meta"
assert isinstance(node, NonNull) or issubclass(
node, (Scalar, Enum, ObjectType, Interface, Union, NonNull)
), f'Received incompatible node "{node}" for Connection {cls.__name__}.'
base_name = re.sub("Connection$", "", name or cls.__name__) or node._meta.name
if not name:
name = f"{base_name}Connection"
edge_class = getattr(cls, "Edge", None)
_node = node
class EdgeBase:
node = Field(_node, description="The item at the end of the edge")
cursor = String(required=True, description="A cursor for use in pagination")
class EdgeMeta:
description = f"A Relay edge containing a `{base_name}` and its cursor."
edge_name = f"{base_name}Edge"
if edge_class:
edge_bases = (edge_class, EdgeBase, ObjectType)
else:
edge_bases = (EdgeBase, ObjectType)
edge = type(edge_name, edge_bases, {"Meta": EdgeMeta})
cls.Edge = edge
options["name"] = name
_meta.node = node
_meta.fields = {
"page_info": Field(
PageInfo,
name="pageInfo",
required=True,
description="Pagination data for this connection.",
),
"edges": Field(
NonNull(List(edge)),
description="Contains the nodes in this connection.",
),
}
return super(Connection, cls).__init_subclass_with_meta__(
_meta=_meta, **options
)
# noinspection PyPep8Naming
def connection_adapter(cls, edges, pageInfo):
"""Adapter for creating Connection instances"""
return cls(edges=edges, page_info=pageInfo)
class IterableConnectionField(Field):
def __init__(self, type_, *args, **kwargs):
kwargs.setdefault("before", String())
kwargs.setdefault("after", String())
kwargs.setdefault("first", Int())
kwargs.setdefault("last", Int())
super(IterableConnectionField, self).__init__(type_, *args, **kwargs)
@property
def type(self):
type_ = super(IterableConnectionField, self).type
connection_type = type_
if isinstance(type_, NonNull):
connection_type = type_.of_type
if is_node(connection_type):
raise Exception(
"ConnectionFields now need a explicit ConnectionType for Nodes.\n"
"Read more: https://github.com/graphql-python/graphene/blob/v2.0.0/UPGRADE-v2.0.md#node-connections"
)
assert issubclass(
connection_type, Connection
), f'{self.__class__.__name__} type has to be a subclass of Connection. Received "{connection_type}".'
return type_
@classmethod
def resolve_connection(cls, connection_type, args, resolved):
if isinstance(resolved, connection_type):
return resolved
assert isinstance(resolved, Iterable), (
f"Resolved value from the connection field has to be an iterable or instance of {connection_type}. "
f'Received "{resolved}"'
)
connection = connection_from_array(
resolved,
args,
connection_type=partial(connection_adapter, connection_type),
edge_type=connection_type.Edge,
page_info_type=page_info_adapter,
)
connection.iterable = resolved
return connection
@classmethod
def connection_resolver(cls, resolver, connection_type, root, info, **args):
resolved = resolver(root, info, **args)
if isinstance(connection_type, NonNull):
connection_type = connection_type.of_type
on_resolve = partial(cls.resolve_connection, connection_type, args)
return maybe_thenable(resolved, on_resolve)
def wrap_resolve(self, parent_resolver):
resolver = super(IterableConnectionField, self).wrap_resolve(parent_resolver)
return partial(self.connection_resolver, resolver, self.type)
ConnectionField = IterableConnectionField
|
the-stack_0_8941 | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/LunariumCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
the-stack_0_8942 | """Init file for Supervisor RESTful API."""
import logging
from pathlib import Path
from typing import Optional
from aiohttp import web
from ..coresys import CoreSys, CoreSysAttributes
from .addons import APIAddons
from .audio import APIAudio
from .auth import APIAuth
from .cli import APICli
from .discovery import APIDiscovery
from .dns import APICoreDNS
from .docker import APIDocker
from .hardware import APIHardware
from .homeassistant import APIHomeAssistant
from .host import APIHost
from .info import APIInfo
from .ingress import APIIngress
from .jobs import APIJobs
from .multicast import APIMulticast
from .network import APINetwork
from .observer import APIObserver
from .os import APIOS
from .proxy import APIProxy
from .resolution import APIResoulution
from .security import SecurityMiddleware
from .services import APIServices
from .snapshots import APISnapshots
from .store import APIStore
from .supervisor import APISupervisor
_LOGGER: logging.Logger = logging.getLogger(__name__)
MAX_CLIENT_SIZE: int = 1024 ** 2 * 16
class RestAPI(CoreSysAttributes):
"""Handle RESTful API for Supervisor."""
def __init__(self, coresys: CoreSys):
"""Initialize Docker base wrapper."""
self.coresys: CoreSys = coresys
self.security: SecurityMiddleware = SecurityMiddleware(coresys)
self.webapp: web.Application = web.Application(
client_max_size=MAX_CLIENT_SIZE,
middlewares=[
self.security.system_validation,
self.security.token_validation,
],
)
# service stuff
self._runner: web.AppRunner = web.AppRunner(self.webapp)
self._site: Optional[web.TCPSite] = None
async def load(self) -> None:
"""Register REST API Calls."""
self._register_addons()
self._register_audio()
self._register_auth()
self._register_cli()
self._register_discovery()
self._register_dns()
self._register_docker()
self._register_hardware()
self._register_homeassistant()
self._register_host()
self._register_info()
self._register_ingress()
self._register_multicast()
self._register_network()
self._register_observer()
self._register_os()
self._register_jobs()
self._register_panel()
self._register_proxy()
self._register_resolution()
self._register_services()
self._register_snapshots()
self._register_supervisor()
self._register_store()
await self.start()
def _register_host(self) -> None:
"""Register hostcontrol functions."""
api_host = APIHost()
api_host.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/host/info", api_host.info),
web.get("/host/logs", api_host.logs),
web.post("/host/reboot", api_host.reboot),
web.post("/host/shutdown", api_host.shutdown),
web.post("/host/reload", api_host.reload),
web.post("/host/options", api_host.options),
web.get("/host/services", api_host.services),
web.post("/host/services/{service}/stop", api_host.service_stop),
web.post("/host/services/{service}/start", api_host.service_start),
web.post("/host/services/{service}/restart", api_host.service_restart),
web.post("/host/services/{service}/reload", api_host.service_reload),
]
)
def _register_network(self) -> None:
"""Register network functions."""
api_network = APINetwork()
api_network.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/network/info", api_network.info),
web.post("/network/reload", api_network.reload),
web.get(
"/network/interface/{interface}/info", api_network.interface_info
),
web.post(
"/network/interface/{interface}/update",
api_network.interface_update,
),
web.get(
"/network/interface/{interface}/accesspoints",
api_network.scan_accesspoints,
),
web.post(
"/network/interface/{interface}/vlan/{vlan}",
api_network.create_vlan,
),
]
)
def _register_os(self) -> None:
"""Register OS functions."""
api_os = APIOS()
api_os.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/os/info", api_os.info),
web.post("/os/update", api_os.update),
web.post("/os/config/sync", api_os.config_sync),
]
)
def _register_jobs(self) -> None:
"""Register Jobs functions."""
api_jobs = APIJobs()
api_jobs.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/jobs/info", api_jobs.info),
web.post("/jobs/options", api_jobs.options),
web.post("/jobs/reset", api_jobs.reset),
]
)
def _register_cli(self) -> None:
"""Register HA cli functions."""
api_cli = APICli()
api_cli.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/cli/info", api_cli.info),
web.get("/cli/stats", api_cli.stats),
web.post("/cli/update", api_cli.update),
]
)
def _register_observer(self) -> None:
"""Register Observer functions."""
api_observer = APIObserver()
api_observer.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/observer/info", api_observer.info),
web.get("/observer/stats", api_observer.stats),
web.post("/observer/update", api_observer.update),
]
)
def _register_multicast(self) -> None:
"""Register Multicast functions."""
api_multicast = APIMulticast()
api_multicast.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/multicast/info", api_multicast.info),
web.get("/multicast/stats", api_multicast.stats),
web.get("/multicast/logs", api_multicast.logs),
web.post("/multicast/update", api_multicast.update),
web.post("/multicast/restart", api_multicast.restart),
]
)
def _register_hardware(self) -> None:
"""Register hardware functions."""
api_hardware = APIHardware()
api_hardware.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/hardware/info", api_hardware.info),
web.get("/hardware/audio", api_hardware.audio),
web.post("/hardware/trigger", api_hardware.trigger),
]
)
def _register_info(self) -> None:
"""Register info functions."""
api_info = APIInfo()
api_info.coresys = self.coresys
self.webapp.add_routes([web.get("/info", api_info.info)])
def _register_resolution(self) -> None:
"""Register info functions."""
api_resolution = APIResoulution()
api_resolution.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/resolution/info", api_resolution.info),
web.post(
"/resolution/check/{check}/options", api_resolution.options_check
),
web.post("/resolution/check/{check}/run", api_resolution.run_check),
web.post(
"/resolution/suggestion/{suggestion}",
api_resolution.apply_suggestion,
),
web.delete(
"/resolution/suggestion/{suggestion}",
api_resolution.dismiss_suggestion,
),
web.delete(
"/resolution/issue/{issue}",
api_resolution.dismiss_issue,
),
web.post("/resolution/healthcheck", api_resolution.healthcheck),
]
)
def _register_auth(self) -> None:
"""Register auth functions."""
api_auth = APIAuth()
api_auth.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/auth", api_auth.auth),
web.post("/auth", api_auth.auth),
web.post("/auth/reset", api_auth.reset),
web.delete("/auth/cache", api_auth.cache),
]
)
def _register_supervisor(self) -> None:
"""Register Supervisor functions."""
api_supervisor = APISupervisor()
api_supervisor.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/supervisor/ping", api_supervisor.ping),
web.get("/supervisor/info", api_supervisor.info),
web.get("/supervisor/stats", api_supervisor.stats),
web.get("/supervisor/logs", api_supervisor.logs),
web.post("/supervisor/update", api_supervisor.update),
web.post("/supervisor/reload", api_supervisor.reload),
web.post("/supervisor/restart", api_supervisor.restart),
web.post("/supervisor/options", api_supervisor.options),
web.post("/supervisor/repair", api_supervisor.repair),
]
)
def _register_homeassistant(self) -> None:
"""Register Home Assistant functions."""
api_hass = APIHomeAssistant()
api_hass.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/core/info", api_hass.info),
web.get("/core/logs", api_hass.logs),
web.get("/core/stats", api_hass.stats),
web.post("/core/options", api_hass.options),
web.post("/core/update", api_hass.update),
web.post("/core/restart", api_hass.restart),
web.post("/core/stop", api_hass.stop),
web.post("/core/start", api_hass.start),
web.post("/core/check", api_hass.check),
web.post("/core/rebuild", api_hass.rebuild),
# Remove with old Supervisor fallback
web.get("/homeassistant/info", api_hass.info),
web.get("/homeassistant/logs", api_hass.logs),
web.get("/homeassistant/stats", api_hass.stats),
web.post("/homeassistant/options", api_hass.options),
web.post("/homeassistant/update", api_hass.update),
web.post("/homeassistant/restart", api_hass.restart),
web.post("/homeassistant/stop", api_hass.stop),
web.post("/homeassistant/start", api_hass.start),
web.post("/homeassistant/check", api_hass.check),
web.post("/homeassistant/rebuild", api_hass.rebuild),
]
)
def _register_proxy(self) -> None:
"""Register Home Assistant API Proxy."""
api_proxy = APIProxy()
api_proxy.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/core/api/websocket", api_proxy.websocket),
web.get("/core/websocket", api_proxy.websocket),
web.get("/core/api/stream", api_proxy.stream),
web.post("/core/api/{path:.+}", api_proxy.api),
web.get("/core/api/{path:.+}", api_proxy.api),
web.get("/core/api/", api_proxy.api),
# Remove with old Supervisor fallback
web.get("/homeassistant/api/websocket", api_proxy.websocket),
web.get("/homeassistant/websocket", api_proxy.websocket),
web.get("/homeassistant/api/stream", api_proxy.stream),
web.post("/homeassistant/api/{path:.+}", api_proxy.api),
web.get("/homeassistant/api/{path:.+}", api_proxy.api),
web.get("/homeassistant/api/", api_proxy.api),
]
)
def _register_addons(self) -> None:
"""Register Add-on functions."""
api_addons = APIAddons()
api_addons.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/addons", api_addons.list),
web.post("/addons/reload", api_addons.reload),
web.get("/addons/{addon}/info", api_addons.info),
web.post("/addons/{addon}/uninstall", api_addons.uninstall),
web.post("/addons/{addon}/start", api_addons.start),
web.post("/addons/{addon}/stop", api_addons.stop),
web.post("/addons/{addon}/restart", api_addons.restart),
web.post("/addons/{addon}/options", api_addons.options),
web.post(
"/addons/{addon}/options/validate", api_addons.options_validate
),
web.get("/addons/{addon}/options/config", api_addons.options_config),
web.post("/addons/{addon}/rebuild", api_addons.rebuild),
web.get("/addons/{addon}/logs", api_addons.logs),
web.get("/addons/{addon}/icon", api_addons.icon),
web.get("/addons/{addon}/logo", api_addons.logo),
web.get("/addons/{addon}/changelog", api_addons.changelog),
web.get("/addons/{addon}/documentation", api_addons.documentation),
web.post("/addons/{addon}/stdin", api_addons.stdin),
web.post("/addons/{addon}/security", api_addons.security),
web.get("/addons/{addon}/stats", api_addons.stats),
]
)
def _register_ingress(self) -> None:
"""Register Ingress functions."""
api_ingress = APIIngress()
api_ingress.coresys = self.coresys
self.webapp.add_routes(
[
web.post("/ingress/session", api_ingress.create_session),
web.post("/ingress/validate_session", api_ingress.validate_session),
web.get("/ingress/panels", api_ingress.panels),
web.view("/ingress/{token}/{path:.*}", api_ingress.handler),
]
)
def _register_snapshots(self) -> None:
"""Register snapshots functions."""
api_snapshots = APISnapshots()
api_snapshots.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/snapshots", api_snapshots.list),
web.post("/snapshots/reload", api_snapshots.reload),
web.post("/snapshots/new/full", api_snapshots.snapshot_full),
web.post("/snapshots/new/partial", api_snapshots.snapshot_partial),
web.post("/snapshots/new/upload", api_snapshots.upload),
web.get("/snapshots/{snapshot}/info", api_snapshots.info),
web.delete("/snapshots/{snapshot}", api_snapshots.remove),
web.post(
"/snapshots/{snapshot}/restore/full", api_snapshots.restore_full
),
web.post(
"/snapshots/{snapshot}/restore/partial",
api_snapshots.restore_partial,
),
web.get("/snapshots/{snapshot}/download", api_snapshots.download),
# Old, remove at end of 2020
web.post("/snapshots/{snapshot}/remove", api_snapshots.remove),
]
)
def _register_services(self) -> None:
"""Register services functions."""
api_services = APIServices()
api_services.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/services", api_services.list),
web.get("/services/{service}", api_services.get_service),
web.post("/services/{service}", api_services.set_service),
web.delete("/services/{service}", api_services.del_service),
]
)
def _register_discovery(self) -> None:
"""Register discovery functions."""
api_discovery = APIDiscovery()
api_discovery.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/discovery", api_discovery.list),
web.get("/discovery/{uuid}", api_discovery.get_discovery),
web.delete("/discovery/{uuid}", api_discovery.del_discovery),
web.post("/discovery", api_discovery.set_discovery),
]
)
def _register_dns(self) -> None:
"""Register DNS functions."""
api_dns = APICoreDNS()
api_dns.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/dns/info", api_dns.info),
web.get("/dns/stats", api_dns.stats),
web.get("/dns/logs", api_dns.logs),
web.post("/dns/update", api_dns.update),
web.post("/dns/options", api_dns.options),
web.post("/dns/restart", api_dns.restart),
web.post("/dns/reset", api_dns.reset),
]
)
def _register_audio(self) -> None:
"""Register Audio functions."""
api_audio = APIAudio()
api_audio.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/audio/info", api_audio.info),
web.get("/audio/stats", api_audio.stats),
web.get("/audio/logs", api_audio.logs),
web.post("/audio/update", api_audio.update),
web.post("/audio/restart", api_audio.restart),
web.post("/audio/reload", api_audio.reload),
web.post("/audio/profile", api_audio.set_profile),
web.post("/audio/volume/{source}/application", api_audio.set_volume),
web.post("/audio/volume/{source}", api_audio.set_volume),
web.post("/audio/mute/{source}/application", api_audio.set_mute),
web.post("/audio/mute/{source}", api_audio.set_mute),
web.post("/audio/default/{source}", api_audio.set_default),
]
)
def _register_store(self) -> None:
"""Register store endpoints."""
api_store = APIStore()
api_store.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/store", api_store.store_info),
web.get("/store/addons", api_store.addons_list),
web.get("/store/addons/{addon}", api_store.addons_addon_info),
web.get("/store/addons/{addon}/{version}", api_store.addons_addon_info),
web.post(
"/store/addons/{addon}/install", api_store.addons_addon_install
),
web.post(
"/store/addons/{addon}/install/{version}",
api_store.addons_addon_install,
),
web.post("/store/addons/{addon}/update", api_store.addons_addon_update),
web.post(
"/store/addons/{addon}/update/{version}",
api_store.addons_addon_update,
),
web.post("/store/reload", api_store.reload),
web.get("/store/repositories", api_store.repositories_list),
web.get(
"/store/repositories/{repository}",
api_store.repositories_repository_info,
),
]
)
# Reroute from legacy
self.webapp.add_routes(
[
web.post("/addons/{addon}/install", api_store.addons_addon_install),
web.post("/addons/{addon}/update", api_store.addons_addon_update),
]
)
def _register_panel(self) -> None:
"""Register panel for Home Assistant."""
panel_dir = Path(__file__).parent.joinpath("panel")
self.webapp.add_routes([web.static("/app", panel_dir)])
def _register_docker(self) -> None:
"""Register docker configuration functions."""
api_docker = APIDocker()
api_docker.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/docker/info", api_docker.info),
web.get("/docker/registries", api_docker.registries),
web.post("/docker/registries", api_docker.create_registry),
web.delete("/docker/registries/{hostname}", api_docker.remove_registry),
]
)
async def start(self) -> None:
"""Run RESTful API webserver."""
await self._runner.setup()
self._site = web.TCPSite(
self._runner, host="0.0.0.0", port=80, shutdown_timeout=5
)
try:
await self._site.start()
except OSError as err:
_LOGGER.critical("Failed to create HTTP server at 0.0.0.0:80 -> %s", err)
else:
_LOGGER.info("Starting API on %s", self.sys_docker.network.supervisor)
async def stop(self) -> None:
"""Stop RESTful API webserver."""
if not self._site:
return
# Shutdown running API
await self._site.stop()
await self._runner.cleanup()
_LOGGER.info("Stopping API on %s", self.sys_docker.network.supervisor)
|
the-stack_0_8944 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import asyncio
import os
from datetime import datetime
from msrest.serialization import TZ_UTC
from azure.communication.identity import CommunicationIdentityClient
from azure.communication.chat.aio import (
ChatClient,
CommunicationTokenCredential
)
from azure.communication.chat import (
ChatParticipant,
ChatMessageType
)
from azure.communication.identity._shared.utils import parse_connection_str
from azure_devtools.scenario_tests import RecordingProcessor
from helper import URIIdentityReplacer
from chat_e2e_helper import ChatURIReplacer
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import BodyReplacerProcessor, ResponseReplacerProcessor
from _shared.utils import get_http_logging_policy
class ChatThreadClientTestAsync(AsyncCommunicationTestCase):
def setUp(self):
super(ChatThreadClientTestAsync, self).setUp()
self.recording_processors.extend([
BodyReplacerProcessor(keys=["id", "token", "senderId", "chatMessageId", "nextLink", "participants", "multipleStatus", "value"]),
URIIdentityReplacer(),
ResponseReplacerProcessor(keys=[self._resource_name]),
ChatURIReplacer()])
endpoint, _ = parse_connection_str(self.connection_str)
self.endpoint = endpoint
self.identity_client = CommunicationIdentityClient.from_connection_string(self.connection_str)
self.users = []
self.user_tokens = []
self.chat_clients = []
# create user 1
self.user = self.identity_client.create_user()
token_response = self.identity_client.get_token(self.user, scopes=["chat"])
self.token = token_response.token
# create user 2
self.new_user = self.identity_client.create_user()
token_response = self.identity_client.get_token(self.new_user, scopes=["chat"])
self.token_new_user = token_response.token
# create ChatClient
self.chat_client = ChatClient(
self.endpoint,
CommunicationTokenCredential(self.token),
http_logging_policy=get_http_logging_policy()
)
self.chat_client_new_user = ChatClient(
self.endpoint,
CommunicationTokenCredential(self.token_new_user),
http_logging_policy=get_http_logging_policy()
)
def tearDown(self):
super(ChatThreadClientTestAsync, self).tearDown()
# delete created users
if not self.is_playback():
self.identity_client.delete_user(self.user)
self.identity_client.delete_user(self.new_user)
async def _create_thread(self):
# create chat thread
topic = "test topic"
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
participants = [ChatParticipant(
identifier=self.user,
display_name='name',
share_history_time=share_history_time
)]
create_chat_thread_result = await self.chat_client.create_chat_thread(topic, thread_participants=participants)
self.chat_thread_client = self.chat_client.get_chat_thread_client(create_chat_thread_result.chat_thread.id)
self.thread_id = self.chat_thread_client.thread_id
async def _create_thread_w_two_users(self):
# create chat thread
topic = "test topic"
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
participants = [
ChatParticipant(
identifier=self.user,
display_name='name',
share_history_time=share_history_time
),
ChatParticipant(
identifier=self.new_user,
display_name='name',
share_history_time=share_history_time
)
]
create_chat_thread_result = await self.chat_client.create_chat_thread(topic, thread_participants=participants)
self.chat_thread_client = self.chat_client.get_chat_thread_client(create_chat_thread_result.chat_thread.id)
self.thread_id = self.chat_thread_client.thread_id
async def _send_message(self):
# send a message
content = 'hello world'
sender_display_name = 'sender name'
create_message_result = await self.chat_thread_client.send_message(
content,
sender_display_name=sender_display_name)
message_id = create_message_result.id
return message_id
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_update_topic(self):
async with self.chat_client:
await self._create_thread()
topic = "update topic"
async with self.chat_thread_client:
await self.chat_thread_client.update_topic(topic=topic)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
content = 'hello world'
sender_display_name = 'sender name'
create_message_result = await self.chat_thread_client.send_message(
content,
sender_display_name=sender_display_name)
create_message_result_id = create_message_result.id
self.assertTrue(create_message_result_id)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_get_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
message_id = await self._send_message()
message = await self.chat_thread_client.get_message(message_id)
assert message.id == message_id
assert message.type == ChatMessageType.TEXT
assert message.content.message == 'hello world'
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_messages(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
chat_messages = self.chat_thread_client.list_messages(results_per_page=1)
items = []
async for item in chat_messages:
items.append(item)
assert len(items) > 0
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_update_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
message_id = await self._send_message()
content = "updated message content"
await self.chat_thread_client.update_message(message_id, content=content)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_delete_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
message_id = await self._send_message()
await self.chat_thread_client.delete_message(message_id)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_participants(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
# add another participant
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_participant = ChatParticipant(
identifier=self.new_user,
display_name='name',
share_history_time=share_history_time)
await self.chat_thread_client.add_participants([new_participant])
chat_thread_participants = self.chat_thread_client.list_participants(results_per_page=1, skip=1)
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 1
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_add_participants(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_participant = ChatParticipant(
identifier=self.new_user,
display_name='name',
share_history_time=share_history_time)
participants = [new_participant]
failed_participants = await self.chat_thread_client.add_participants(participants)
# no error occured while adding participants
assert len(failed_participants) == 0
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_remove_participant(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
# add participant first
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_participant = ChatParticipant(
identifier=self.new_user,
display_name='name',
share_history_time=share_history_time)
participants = [new_participant]
await self.chat_thread_client.add_participants(participants)
# test remove participant
await self.chat_thread_client.remove_participant(self.new_user)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_typing_notification(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self.chat_thread_client.send_typing_notification()
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_typing_notification_with_sender_display_name(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self.chat_thread_client.send_typing_notification(sender_display_name="John")
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_read_receipt(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
message_id = await self._send_message()
await self.chat_thread_client.send_read_receipt(message_id)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
async def _wait_on_thread(self, chat_client, thread_id, message_id):
# print("Read Receipts Sent: ", read_receipts_sent)
chat_thread_client = chat_client.get_chat_thread_client(thread_id)
for _ in range(10):
read_receipts_paged = chat_thread_client.list_read_receipts()
chat_message_ids = []
async for page in read_receipts_paged.by_page():
async for item in page:
chat_message_ids.append(item.chat_message_id)
if message_id in chat_message_ids:
return
else:
print("Sleeping for additional 2 secs")
await asyncio.sleep(2)
raise Exception("Read receipts not updated in 20 seconds. Failing.")
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_read_receipts(self):
async with self.chat_client:
await self._create_thread_w_two_users()
async with self.chat_thread_client:
# first user sends 2 messages
for i in range(2):
message_id = await self._send_message()
# send read receipts first
await self.chat_thread_client.send_read_receipt(message_id)
if self.is_live:
await self._wait_on_thread(chat_client=self.chat_client, thread_id=self.thread_id, message_id=message_id)
# get chat thread client for second user
chat_thread_client_new_user = self.chat_client_new_user.get_chat_thread_client(self.thread_id)
# second user sends 1 message
message_result_new_user = await chat_thread_client_new_user.send_message(
"content",
sender_display_name="sender_display_name")
message_id_new_user = message_result_new_user.id
# send read receipt
await chat_thread_client_new_user.send_read_receipt(message_id_new_user)
if self.is_live:
await self._wait_on_thread(chat_client=self.chat_client_new_user, thread_id=self.thread_id, message_id=message_id_new_user)
# list read receipts
read_receipts = self.chat_thread_client.list_read_receipts(results_per_page=2, skip=0)
items = []
async for page in read_receipts.by_page():
async for item in page:
items.append(item)
assert len(items) == 2
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_get_properties(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
get_thread_result = await self.chat_thread_client.get_properties()
assert get_thread_result.id == self.thread_id
# delete created users and chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
|
the-stack_0_8945 | from No import No
from Estado import Estado
estadoInicial = Estado('/home/ec2-user/environment/DiretorioInicial')
raiz = No(estadoInicial)
estadosFilhos = estadoInicial.funcaoSucessora()
for estadoFilho in estadosFilhos:
noFilho = No(Estado(estadoFilho))
raiz.addFilho(noFilho)
raiz.printArvore()
|
the-stack_0_8947 | from __future__ import absolute_import, division, print_function
from oem.core.providers.base import Provider
from oem.version import __version__
from oem_core.core.plugin import PluginManager
import inspect
import logging
import six
log = logging.getLogger(__name__)
class Client(object):
version = __version__
def __init__(self, services, provider, formats=None):
"""Client for OpenEntityMap.
:param services: List of services to load (e.g. "anidb")
:type services: list
:param provider: Provider to use for databases (e.g. "package", "release/incremental")
:type provider: str or oem.core.providers.base.Base
:param formats: List of formats to use, or `None` for any
:type formats: list or None
"""
self._formats = formats
# Discover available plugins
self._plugins = PluginManager
self._plugins.discover()
# Construct plugins
self._services = self._construct_services(services)
self._provider = self._construct_provider(provider)
# Build database + package tables
self._databases = {}
self._packages = {}
for _, cls in self._load_plugins('client', services, construct=False):
# Merge service databases into client
if cls.__databases__:
self._databases.update(cls.__databases__)
else:
log.warn('Service %r has no "__databases__" defined', cls.__key__)
# Merge service packages into client
if cls.__packages__:
self._packages.update(cls.__packages__)
else:
log.warn('Service %r has no "__packages__" defined', cls.__key__)
@property
def formats(self):
return self._formats
@property
def plugins(self):
return self._plugins
@property
def provider(self):
return self._provider
def load_all(self):
for service in six.itervalues(self._services):
service.load()
def database_name(self, source, target):
return self._databases.get((source, target))
def package_name(self, source, target):
return self._packages.get((source, target))
def __getitem__(self, source):
return ServiceInterface(self, source)
#
# Private methods
#
def _construct_services(self, services):
result = {}
for _, cls in self._load_plugins('client', services, construct=False):
# Add supported service conversions
for source, targets in cls.__services__.items():
for target in targets:
# Construct service
result[(source, target)] = cls(self, source, target)
return result
def _construct_provider(self, provider_or_key):
if isinstance(provider_or_key, Provider):
# Class
provider = provider_or_key
elif isinstance(provider_or_key, six.string_types):
# Identifier
provider = PluginManager.get('client-provider', provider_or_key)
if provider is None:
raise ValueError('Unable to find provider: %r' % provider_or_key)
else:
raise ValueError('Unknown provider: %r' % provider_or_key)
# Ensure provider has been constructed
if inspect.isclass(provider):
provider = provider()
# Initialize provider
provider.initialize(self)
return provider
@staticmethod
def _load_plugins(kind, keys, construct=True):
if not keys:
return
for name in keys:
cls = PluginManager.get(kind, name)
if cls is None:
log.warn('Unable to find plugin: %r', name)
continue
if not cls.available:
log.warn('Plugin %r is not available', name)
continue
if construct:
yield cls.__key__, cls()
else:
yield cls.__key__, cls
class ServiceInterface(object):
def __init__(self, client, source):
self.client = client
self.source = source
def to(self, target):
try:
return self.client._services[(self.source, target)]
except KeyError:
raise KeyError('Unknown service: %s -> %s' % (self.source, target))
|
the-stack_0_8948 | #
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"Retrieve remote schema file."
from pathlib import Path
import re
from typing import Union
from urllib.parse import urlparse
from urllib.request import urlopen
import requests
import requests.exceptions
from . import typing as typing_
from .exceptions import InsecureConnectionError
# Semantically, typing_.PathLike doesn't cover strings that represent URLs
def retrieve_schema_file(url_or_path: Union[typing_.PathLike, str], *,
encoding: str = 'utf-8',
tls_verification: Union[bool, typing_.PathLike] = True) -> str:
"""Retrieve a single schema file.
:param url_or_path: URL or path to the schema file.
:param encoding: The encoding of the text in ``url_or_path``.
:param tls_verification: Same as ``tls_verification`` in :class:`pydax.Schema`.
:raises ValueError: See :class:`pydax.Schema`.
:raises InsecureConnectionError: See :class:`pydax.Schema`.
:return: A string of the content.
"""
url_or_path = str(url_or_path)
# We don't detect fully whether the input is a URL or a file path because I couldn't find a reliable way. Almost any
# string with no backslash can be a file name on Linux. URL detection often involves either giant dependencies such
# as Django, or tediously long regular expression that we can't assure that it would work. Here, we detect the
# beginning of the string. If it doesn't look like a URL, treat it as a file path.
if re.match(r'[a-zA-Z0-9]+:\/\/', url_or_path):
parse_result = urlparse(url_or_path)
scheme = parse_result.scheme
if scheme in ('http', 'https'):
if scheme == 'http' and tls_verification:
raise InsecureConnectionError((f'{url_or_path} is a http link and insecure. '
'Set tls_verification=False to accept http links.'))
try:
content = requests.get(url_or_path, allow_redirects=True, verify=tls_verification).content
except requests.exceptions.SSLError as e:
raise InsecureConnectionError((f'Failed to securely connect to {url_or_path}. Caused by:\n{e}'))
# We don't use requests.Response.encoding and requests.Response.text because it is always silent when
# there's an encoding error
return content.decode(encoding)
elif scheme == 'file':
with urlopen(url_or_path) as f: # nosec: bandit will always complain but we know it points to a local file
return f.read().decode(encoding)
else:
raise ValueError(f'Unknown scheme in "{url_or_path}": "{scheme}"')
else:
# Not a URL, treated as a local file path
return Path(url_or_path).read_text(encoding)
|
the-stack_0_8949 | """Dense univariate polynomials with coefficients in Galois fields. """
from random import uniform
from math import ceil, sqrt, log
from sympy.polys.polyutils import (
_sort_factors
)
from sympy.polys.polyerrors import (
ExactQuotientFailed
)
from sympy.utilities import (
any, all, cythonized
)
from sympy.ntheory import factorint
def gf_crt(U, M, K):
"""Chinese Remainder Theorem.
Given a set of integer residues `u_0,...,u_n` and a set of
co-prime integer moduli `m_0,...,m_n`, returns an integer
`u`, such that `u = u_i mod m_i` for `i = `0,...,n`.
As an example consider a set of residues `U = [49, 76, 65]`
and a set of moduli `M = [99, 97, 95]`. Then we have::
>>> from sympy.polys.galoistools import gf_crt
>>> from sympy.polys.algebratools import ZZ
>>> gf_crt([49, 76, 65], [99, 97, 95], ZZ)
639985
This is correct result because::
>>> 639985 % 99
49
>>> 639985 % 97
76
>>> 639985 % 95
65
"""
p, v = K.one, K.zero
for m in M:
p *= m
for u, m in zip(U, M):
e = p // m
s, _, _ = K.gcdex(e, m)
v += e*(u*s % m)
return v % p
def gf_crt1(M, K):
"""First part of Chines Remainder Theorem. """
p, E, S = K.one, [], []
for m in M:
p *= m
for m in M:
E.append(p // m)
S.append(K.gcdex(E[-1], m)[0] % m)
return p, E, S
def gf_crt2(U, M, p, E, S, K):
"""Second part of Chinese Remainder Theorem. """
v = K.zero
for u, m, e, s in zip(U, M, E, S):
v += e*(u*s % m)
return v % p
def gf_int(a, p):
"""Coerce `a mod p` to an integer in `[-p/2, p/2]` range. """
if a <= p // 2:
return a
else:
return a - p
def gf_degree(f):
"""Returns leading degree of `f`. """
return len(f)-1
def gf_LC(f, K):
"""Returns leading coefficient of `f`. """
if not f:
return K.zero
else:
return f[0]
def gf_TC(f, K):
"""Returns trailing coefficient of `f`. """
if not f:
return K.zero
else:
return f[-1]
@cythonized("k")
def gf_strip(f):
"""Remove leading zeros from `f`. """
if not f or f[0]:
return f
k = 0
for coeff in f:
if coeff:
break
else:
k += 1
return f[k:]
def gf_trunc(f, p):
"""Reduce all coefficients modulo `p`. """
return gf_strip([ a % p for a in f ])
def gf_normal(f, p, K):
"""Normalize all coefficients in `K`. """
return gf_trunc(map(K, f), p)
def gf_convert(f, p, K0, K1):
"""Normalize all coefficients in `K`. """
return gf_trunc([ K1.convert(c, K0) for c in f ], p)
@cythonized("k,n")
def gf_from_dict(f, p, K):
"""Create `GF(p)[x]` polynomial from a dict. """
n, h = max(f.iterkeys()), []
if type(n) is int:
for k in xrange(n, -1, -1):
h.append(f.get(k, K.zero) % p)
else:
(n,) = n
for k in xrange(n, -1, -1):
h.append(f.get((k,), K.zero) % p)
return gf_trunc(h, p)
@cythonized("k,n")
def gf_to_dict(f, p, symmetric=True):
"""Convert `GF(p)[x]` polynomial to a dict. """
n, result = gf_degree(f), {}
for k in xrange(0, n+1):
if symmetric:
a = gf_int(f[n-k], p)
else:
a = f[n-k]
if a: result[k] = a
return result
def gf_from_int_poly(f, p):
"""Create `GF(p)[x]` polynomial from `Z[x]`. """
return gf_trunc(f, p)
def gf_to_int_poly(f, p, symmetric=True):
"""Convert `GF(p)[x]` polynomial to `Z[x]`. """
if symmetric:
return [ gf_int(c, p) for c in f ]
else:
return f
def gf_neg(f, p, K):
"""Negate a polynomial in `GF(p)[x]`. """
return [ -coeff % p for coeff in f ]
def gf_add_ground(f, a, p, K):
"""Returns `f + a` where `f` in `GF(p)[x]` and `a` in `GF(p)`. """
if not f:
a = a % p
else:
a = (f[-1] + a) % p
if len(f) > 1:
return f[:-1] + [a]
if not a:
return []
else:
return [a]
def gf_sub_ground(f, a, p, K):
"""Returns `f - a` where `f` in `GF(p)[x]` and `a` in `GF(p)`. """
if not f:
a = -a % p
else:
a = (f[-1] - a) % p
if len(f) > 1:
return f[:-1] + [a]
if not a:
return []
else:
return [a]
def gf_mul_ground(f, a, p, K):
"""Returns `f * a` where `f` in `GF(p)[x]` and `a` in `GF(p)`. """
if not a:
return []
else:
return [ (a*b) % p for b in f ]
def gf_exquo_ground(f, a, p, K):
"""Returns `f / a` where `f` in `GF(p)[x]` and `a` in `GF(p)`. """
return gf_mul_ground(f, K.invert(a, p), p, K)
@cythonized("df,dg,k")
def gf_add(f, g, p, K):
"""Add polynomials in `GF(p)[x]`. """
if not f:
return g
if not g:
return f
df = gf_degree(f)
dg = gf_degree(g)
if df == dg:
return gf_strip([ (a + b) % p for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = g[:k], g[k:]
return h + [ (a + b) % p for a, b in zip(f, g) ]
@cythonized("df,dg,k")
def gf_sub(f, g, p, K):
"""Subtract polynomials in `GF(p)[x]`. """
if not g:
return f
if not f:
return gf_neg(g, p, K)
df = gf_degree(f)
dg = gf_degree(g)
if df == dg:
return gf_strip([ (a - b) % p for a, b in zip(f, g) ])
else:
k = abs(df - dg)
if df > dg:
h, f = f[:k], f[k:]
else:
h, g = gf_neg(g[:k], p, K), g[k:]
return h + [ (a - b) % p for a, b in zip(f, g) ]
@cythonized("df,dg,dh,i,j")
def gf_mul(f, g, p, K):
"""Multiply polynomials in `GF(p)[x]`. """
df = gf_degree(f)
dg = gf_degree(g)
dh = df + dg
h = [0]*(dh+1)
for i in xrange(0, dh+1):
coeff = K.zero
for j in xrange(max(0, i-dg), min(i, df)+1):
coeff += f[j]*g[i-j]
h[i] = coeff % p
return gf_strip(h)
@cythonized("df,dh,i,j,jmin,jmax,n")
def gf_sqr(f, p, K):
"""Square polynomials in `GF(p)[x]`. """
df = gf_degree(f)
dh = 2*df
h = [0]*(dh+1)
for i in xrange(0, dh+1):
coeff = K.zero
jmin = max(0, i-df)
jmax = min(i, df)
n = jmax - jmin + 1
jmax = jmin + n // 2 - 1
for j in xrange(jmin, jmax+1):
coeff += f[j]*f[i-j]
coeff += coeff
if n & 1:
elem = f[jmax+1]
coeff += elem**2
h[i] = coeff % p
return gf_strip(h)
def gf_add_mul(f, g, h, p, K):
"""Returns `f + g*h` where `f`, `g`, `h` in `GF(p)[x]`. """
return gf_add(f, gf_mul(g, h, p, K), p, K)
def gf_sub_mul(f, g, h, p, K):
"""Returns `f - g*h` where `f`, `g`, `h` in `GF(p)[x]`. """
return gf_sub(f, gf_mul(g, h, p, K), p, K)
@cythonized("k")
def gf_expand(F, p, K):
"""Expand results of `factor()` in `GF(p)[x]`. """
if type(F) is tuple:
lc, F = F
else:
lc = K.one
g = [lc]
for f, k in F:
f = gf_pow(f, k, p, K)
g = gf_mul(g, f, p, K)
return g
@cythonized("df,dg,dq,dr,i,j")
def gf_div(f, g, p, K):
"""Division with remainder in `GF(p)[x]`.
Given univariate polynomials `f` and `g` with coefficients in a
finite field with `p` elements, returns polynomials `q` and `r`
(quotient and remainder) such that `f = q*g + r`.
Consider polynomials `x**3 + x + 1` and `x**2 + x` in GF(2)::
>>> from sympy.polys.galoistools import gf_div, gf_add_mul
>>> from sympy.polys.algebratools import ZZ
>>> gf_div([1, 0, 1, 1], [1, 1, 0], 2, ZZ)
([1, 1], [1])
As result we obtained quotient `x + 1` and remainder `1`, thus::
>>> gf_add_mul([1], [1, 1], [1, 1, 0], 2, ZZ)
[1, 0, 1, 1]
References
==========
.. [Monagan93] Michael Monagan, In-place Arithmetic for Polynomials
over Z_n, Proceedings of DISCO '92, Springer-Verlag LNCS, 721,
1993, pp. 22-34
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 247
"""
df = gf_degree(f)
dg = gf_degree(g)
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return [], f
inv = K.invert(g[0], p)
h, dq, dr = list(f), df-dg, dg-1
for i in xrange(0, df+1):
coeff = h[i]
for j in xrange(max(0, dg-i), min(df-i, dr)+1):
coeff -= h[i+j-dg] * g[dg-j]
if i <= dq:
coeff *= inv
h[i] = coeff % p
return h[:dq+1], gf_strip(h[dq+1:])
def gf_rem(f, g, p, K):
"""Returns polynomial remainder in `GF(p)[x]`. """
return gf_div(f, g, p, K)[1]
def gf_quo(f, g, p, K):
"""Returns polynomial quotient in `GF(p)[x]`. """
q, r = gf_div(f, g, p, K)
if not r:
return q
else:
raise ExactQuotientFailed('%s does not divide %s' % (g, f))
@cythonized("df,dg,dq,dr,i,j")
def gf_exquo(f, g, p, K):
"""Computes exact quotient in `GF(p)[x]`. """
df = gf_degree(f)
dg = gf_degree(g)
if not g:
raise ZeroDivisionError("polynomial division")
elif df < dg:
return []
inv = K.invert(g[0], p)
h, dq, dr = f[:], df-dg, dg-1
for i in xrange(0, dq+1):
coeff = h[i]
for j in xrange(max(0, dg-i), min(df-i, dr)+1):
coeff -= h[i+j-dg] * g[dg-j]
h[i] = (coeff * inv) % p
return h[:dq+1]
@cythonized("n")
def gf_lshift(f, n, K):
"""Efficiently multiply `f` by `x**n`. """
if not f:
return f
else:
return f + [K.zero]*n
@cythonized("n")
def gf_rshift(f, n, K):
"""Efficiently divide `f` by `x**n`. """
if not n:
return f, []
else:
return f[:-n], f[-n:]
def gf_pow(f, n, p, K):
"""Computes `f**n` in `GF(p)[x]` using repeated squaring. """
if not n:
return [K.one]
elif n == 1:
return f
elif n == 2:
return gf_sqr(f, p, K)
h = [K.one]
while True:
if n & 1:
h = gf_mul(h, f, p, K)
n -= 1
n >>= 1
if not n:
break
f = gf_sqr(f, p, K)
return h
def gf_pow_mod(f, n, g, p, K):
"""Computes `f**n` in `GF(p)[x]/(g)` using repeated squaring.
Given polynomials `f` and `g` in `GF(p)[x]` and a non-negative
integer `n`, efficiently computes `f**n (mod g)` i.e. remainder
from division `f**n` by `g` using repeated squaring algorithm.
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 69
"""
if not n:
return [K.one]
elif n == 1:
return gf_rem(f, g, p, K)
elif n == 2:
return gf_rem(gf_sqr(f, p, K), g, p, K)
h = [K.one]
while True:
if n & 1:
h = gf_mul(h, f, p, K)
h = gf_rem(h, g, p, K)
n -= 1
n >>= 1
if not n:
break
f = gf_sqr(f, p, K)
f = gf_rem(f, g, p, K)
return h
def gf_gcd(f, g, p, K):
"""Euclidean Algorithm in `GF(p)[x]`. """
while g:
f, g = g, gf_rem(f, g, p, K)
return gf_monic(f, p, K)[1]
def gf_gcdex(f, g, p, K):
"""Extended Euclidean Algorithm in `GF(p)[x]`.
Given polynomials `f` and `g` in `GF(p)[x]`, computes polynomials
`s`, `t` and `h`, such that `h = gcd(f, g)` and `s*f + t*g = h`. The
typical application of EEA is solving polynomial diophantine equations.
Consider polynomials `f = (x + 7) (x + 1)`, `g = (x + 7) (x**2 + 1)`
in `GF(11)[x]`. Application of Extended Euclidean Algorithm gives::
>>> from sympy.polys.galoistools import gf_gcdex, gf_mul, gf_add
>>> from sympy.polys.algebratools import ZZ
>>> s, t, g = gf_gcdex([1,8,7], [1,7,1,7], 11, ZZ)
>>> s, t, g
([5, 6], [6], [1, 7])
As result we obtained polynomials `s = 5*x + 6` and `t = 6`, and
additionally `gcd(f, g) = x + 7`. This is correct because::
>>> S = gf_mul(s, [1,8,7], 11, ZZ)
>>> T = gf_mul(t, [1,7,1,7], 11, ZZ)
>>> gf_add(S, T, 11, ZZ) == [1, 7]
True
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 46
"""
if not (f or g):
return [K.one], [], []
p0, r0 = gf_monic(f, p, K)
p1, r1 = gf_monic(g, p, K)
if not f:
return [], [K.invert(p1, p)], r1
if not g:
return [K.invert(p0, p)], [], r0
s0, s1 = [K.invert(p0, p)], []
t0, t1 = [], [K.invert(p1, p)]
while True:
Q, R = gf_div(r0, r1, p, K)
if not R:
break
(lc, r1), r0 = gf_monic(R, p, K), r1
inv = K.invert(lc, p)
s = gf_sub_mul(s0, s1, Q, p, K)
t = gf_sub_mul(t0, t1, Q, p, K)
s1, s0 = gf_mul_ground(s, inv, p, K), s1
t1, t0 = gf_mul_ground(t, inv, p, K), t1
return s1, t1, r1
def gf_monic(f, p, K):
"""Returns LC and a monic polynomial in `GF(p)[x]`."""
if not f:
return K.zero, []
else:
lc = f[0]
if K.is_one(lc):
return lc, list(f)
else:
return lc, gf_exquo_ground(f, lc, p, K)
@cythonized("df,n")
def gf_diff(f, p, K):
"""Differentiate polynomial in `GF(p)[x]`. """
df = gf_degree(f)
h, n = [K.zero]*df, df
for coeff in f[:-1]:
coeff *= K(n)
coeff %= p
if coeff:
h[df-n] = coeff
n -= 1
return gf_strip(h)
def gf_eval(f, a, p, K):
"""Evaluate `f(a)` in `GF(p)` using Horner scheme. """
result = K.zero
for c in f:
result *= a
result += c
result %= p
return result
def gf_multi_eval(f, A, p, K):
"""Evaluate `f(a)` for `a` in `[a_1, ..., a_n]`. """
return [ gf_eval(f, a, p, K) for a in A ]
def gf_compose(f, g, p, K):
"""Compute polynomial composition `f(g)` in `GF(p)[x]`. """
if len(g) <= 1:
return gf_strip([gf_eval(f, gf_LC(g, K), p, K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = gf_mul(h, g, p, K)
h = gf_add_ground(h, c, p, K)
return h
def gf_compose_mod(g, h, f, p, K):
"""Compute polynomial composition `g(h)` in `GF(p)[x]/(f)`. """
if not g:
return []
comp = [g[0]]
for a in g[1:]:
comp = gf_mul(comp, h, p, K)
comp = gf_add_ground(comp, a, p, K)
comp = gf_rem(comp, f, p, K)
return comp
@cythonized("n")
def gf_trace_map(a, b, c, n, f, p, K):
"""Compute polynomial trace map in `GF(p)[x]/(f)`.
Given polynomial `f` in `GF(p)[x]`, polynomials `a`, `b`, `c`
in quotient ring `GF(p)[x]/(f)` such that `b = c**t (mod f)`
for some positive power `t` of `p` and a positive integer `n`,
returns a mapping::
a -> a**t**n, a + a**t + a**t**2 + ... + a**t**n (mod f)
In factorization context, `b = x**p mod f` and `c = x mod f`.
This way we can efficiently compute trace polynomials in equal
degree factorization routine, much faster than with other methods,
like iterated Frobenius algorithm, for large degrees.
References
==========
.. [Gathen92] J. von zur Gathen, V. Shoup, Computing Frobenius Maps
and Factoring Polynomials, ACM Symposium on Theory of Computing,
1992, pp. 187-224
"""
u = gf_compose_mod(a, b, f, p, K)
v = b
if n & 1:
U = gf_add(a, u, p, K)
V = b
else:
U = a
V = c
n >>= 1
while n:
u = gf_add(u, gf_compose_mod(u, v, f, p, K), p, K)
v = gf_compose_mod(v, v, f, p, K)
if n & 1:
U = gf_add(U, gf_compose_mod(u, V, f, p, K), p, K)
V = gf_compose_mod(v, V, f, p, K)
n >>= 1
return gf_compose_mod(a, V, f, p, K), U
@cythonized("i,n")
def gf_random(n, p, K):
"""Generate a random polynomial in `GF(p)[x]` of degree `n`. """
return [K.one] + [ K(int(uniform(0, p))) for i in xrange(0, n) ]
@cythonized("i,n")
def gf_irreducible(n, p, K):
"""Generate random irreducible polynomial of degree `n` in `GF(p)[x]`. """
while True:
f = gf_random(n, p, K)
if gf_irreducible_p(f, p, K):
return f
@cythonized("i,n")
def gf_irred_p_ben_or(f, p, K):
"""Ben-Or's polynomial irreducibility test over finite fields. """
n = gf_degree(f)
if n <= 1:
return True
_, f = gf_monic(f, p, K)
H = h = gf_pow_mod([K.one, K.zero], p, f, p, K)
for i in xrange(0, n//2):
g = gf_sub(h, [K.one, K.zero], p, K)
if gf_gcd(f, g, p, K) == [K.one]:
h = gf_compose_mod(h, H, f, p, K)
else:
return False
return True
@cythonized("i,n,d")
def gf_irred_p_rabin(f, p, K):
"""Rabin's polynomial irreducibility test over finite fields. """
n = gf_degree(f)
if n <= 1:
return True
_, f = gf_monic(f, p, K)
x = [K.one, K.zero]
H = h = gf_pow_mod(x, p, f, p, K)
indices = set([ n//d for d in factorint(n) ])
for i in xrange(1, n):
if i in indices:
g = gf_sub(h, x, p, K)
if gf_gcd(f, g, p, K) != [K.one]:
return False
h = gf_compose_mod(h, H, f, p, K)
return h == x
_irred_methods = {
'ben-or' : gf_irred_p_ben_or,
'rabin' : gf_irred_p_rabin,
}
def gf_irreducible_p(f, p, K, **args):
"""Test irreducibility of a polynomial `f` in `GF(p)[x]`. """
method = args.get('method')
if method is not None:
irred = _irred_methods[method](f, p, K)
else:
irred = gf_irred_p_rabin(f, p, K)
return irred
def gf_sqf_p(f, p, K):
"""Returns `True` if `f` is square-free in `GF(p)[x]`. """
_, f = gf_monic(f, p, K)
if not f:
return True
else:
return gf_gcd(f, gf_diff(f, p, K), p, K) == [K.one]
def gf_sqf_part(f, p, K):
"""Returns square-free part of a `GF(p)[x]` polynomial. """
_, sqf = gf_sqf_list(f, p, K)
g = [K.one]
for f, _ in sqf:
g = gf_mul(g, f, p, K)
return g
@cythonized("i,n,d,r")
def gf_sqf_list(f, p, K):
"""Returns square-free decomposition of a `GF(p)[x]` polynomial.
Given a polynomial `f` in `GF(p)[x]`, returns the leading coefficient
of `f` and a square-free decomposition `f_1**e_1 f_2**e_2 ... f_k**e_k`
such that all `f_i` are monic polynomials and `(f_i, f_j)` for `i != j`
are co-prime and `e_1 ... e_k` are given in increasing order. All
trivial terms (i.e. `f_i = 1`) aren't included in the output.
Consider polynomial `f = x**11 + 1` over `GF(11)[x]`::
>>> from sympy.polys.galoistools import (
... gf_from_dict, gf_diff, gf_sqf_list, gf_pow,
... )
... # doctest: +NORMALIZE_WHITESPACE
>>> from sympy.polys.algebratools import ZZ
>>> f = gf_from_dict({11: 1, 0: 1}, 11, ZZ)
Note that `f'(x) = 0`::
>>> gf_diff(f, 11, ZZ)
[]
This phenomenon doesn't happen in characteristic zero. However we can
still compute square-free decomposition of `f` using `gf_sqf()`::
>>> gf_sqf_list(f, 11, ZZ)
(1, [([1, 1], 11)])
We obtained factorization `f = (x + 1)**11`. This is correct because::
>>> gf_pow([1, 1], 11, 11, ZZ) == f
True
References
==========
.. [Geddes92] K. Geddes, S. Czapor, G. Labahn, Algorithms for
Computer Algebra, First Edition, Springer, 1992, pp. 343-347
"""
n, sqf, factors, r = 1, False, [], int(p)
lc, f = gf_monic(f, p, K)
if gf_degree(f) < 1:
return lc, []
while True:
F = gf_diff(f, p, K)
if F != []:
g = gf_gcd(f, F, p, K)
h = gf_exquo(f, g, p, K)
i = 1
while h != [K.one]:
G = gf_gcd(g, h, p, K)
H = gf_exquo(h, G, p, K)
if gf_degree(H) > 0:
factors.append((H, i*n))
g, h, i = gf_exquo(g, G, p, K), G, i+1
if g == [K.one]:
sqf = True
else:
f = g
if not sqf:
d = gf_degree(f) // r
for i in xrange(0, d+1):
f[i] = f[i*r]
f, n = f[:d+1], n*r
else:
break
return lc, factors
@cythonized("n,i,j,r")
def gf_Qmatrix(f, p, K):
"""Calculate Berlekamp's `Q` matrix. """
n, r = gf_degree(f), int(p)
q = [K.one] + [K.zero]*(n-1)
Q = [list(q)] + [[]]*(n-1)
for i in xrange(1, (n-1)*r + 1):
qq, c = [(-q[-1]*f[-1]) % p], q[-1]
for j in xrange(1, n):
qq.append((q[j-1] - c*f[-j-1]) % p)
if not (i % r):
Q[i//r] = list(qq)
q = qq
return Q
@cythonized("n,i,j,k")
def gf_Qbasis(Q, p, K):
"""Compute a basis of the kernel of `Q`. """
Q, n = [ list(q) for q in Q ], len(Q)
for k in xrange(0, n):
Q[k][k] = (Q[k][k] - K.one) % p
for k in xrange(0, n):
for i in xrange(k, n):
if Q[k][i]:
break
else:
continue
inv = K.invert(Q[k][i], p)
for j in xrange(0, n):
Q[j][i] = (Q[j][i]*inv) % p
for j in xrange(0, n):
t = Q[j][k]
Q[j][k] = Q[j][i]
Q[j][i] = t
for i in xrange(0, n):
if i != k:
q = Q[k][i]
for j in xrange(0, n):
Q[j][i] = (Q[j][i] - Q[j][k]*q) % p
for i in xrange(0, n):
for j in xrange(0, n):
if i == j:
Q[i][j] = (K.one - Q[i][j]) % p
else:
Q[i][j] = (-Q[i][j]) % p
basis = []
for q in Q:
if any(q):
basis.append(q)
return basis
@cythonized("i,k")
def gf_berlekamp(f, p, K):
"""Factor a square-free `f` in `GF(p)[x]` for small `p`. """
Q = gf_Qmatrix(f, p, K)
V = gf_Qbasis(Q, p, K)
for i, v in enumerate(V):
V[i] = gf_strip(list(reversed(v)))
factors = [f]
for k in xrange(1, len(V)):
for f in list(factors):
s = K.zero
while s < p:
g = gf_sub_ground(V[k], s, p, K)
h = gf_gcd(f, g, p, K)
if h != [K.one] and h != f:
factors.remove(f)
f = gf_exquo(f, h, p, K)
factors.extend([f, h])
if len(factors) == len(V):
return _sort_factors(factors, multiple=False)
s += K.one
return _sort_factors(factors, multiple=False)
@cythonized("i")
def gf_ddf_zassenhaus(f, p, K):
"""Cantor-Zassenhaus: Deterministic Distinct Degree Factorization
Given a monic square-free polynomial `f` in `GF(p)[x]`, computes
partial distinct degree factorization `f_1 ... f_d` of `f` where
`deg(f_i) != deg(f_j)` for `i != j`. The result is returned as a
list of pairs `(f_i, e_i)` where `deg(f_i) > 0` and `e_i > 0` is
an argument to the equal degree factorization routine.
Consider polynomial `x**15 - 1` in `GF(11)[x]`::
>>> from sympy.polys.galoistools import gf_from_dict
>>> from sympy.polys.algebratools import ZZ
>>> f = gf_from_dict({15: 1, 0: -1}, 11, ZZ)
Distinct degree factorization gives::
>>> from sympy.polys.galoistools import gf_ddf_zassenhaus
>>> gf_ddf_zassenhaus(f, 11, ZZ)
[([1, 0, 0, 0, 0, 10], 1), ([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], 2)]
which means `x**15 - 1 = (x**5 - 1) (x**10 + x**5 + 1)`. To obtain
factorization into irreducibles, use equal degree factorization
procedure (EDF) with each of the factors.
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 356
.. [Geddes92] K. Geddes, S. Czapor, G. Labahn, Algorithms for Computer
Algebra, First Edition, Springer, 1992, pp. 368-371
"""
i, g, factors = 1, [K.one, K.zero], []
while 2*i <= gf_degree(f):
g = gf_pow_mod(g, int(p), f, p, K)
h = gf_gcd(f, gf_sub(g, [K.one, K.zero], p, K), p, K)
if h != [K.one]:
factors.append((h, i))
f = gf_exquo(f, h, p, K)
g = gf_rem(g, f, p, K)
i += 1
if f != [K.one]:
return factors + [(f, gf_degree(f))]
else:
return factors
@cythonized("n,N,i")
def gf_edf_zassenhaus(f, n, p, K):
"""Cantor-Zassenhaus: Probabilistic Equal Degree Factorization
Given a monic square-free polynomial `f` in `GF(p)[x]` and integer
`n` such that `n` divides `deg(f)`, returns all irreducible factors
`f_1 ... f_d` of `f`, each of degree `n`. This is a complete
factorization in Galois fields.
Consider square-free polynomial `f = x**3 + x**2 + x + 1` in
`GF(5)[x]`. Lets compute its irreducible factors of degree one::
>>> from sympy.polys.galoistools import gf_edf_zassenhaus
>>> from sympy.polys.algebratools import ZZ
>>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ)
[[1, 1], [1, 2], [1, 3]]
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 358
.. [Geddes92] K. Geddes, S. Czapor, G. Labahn, Algorithms for Computer
Algebra, First Edition, Springer, 1992, pp. 371-373
"""
factors, q = [f], int(p)
if gf_degree(f) <= n:
return factors
N = gf_degree(f) // n
while len(factors) < N:
r = gf_random(2*n-1, p, K)
if p == 2:
h = r
for i in xrange(0, 2**(n*N-1)):
r = gf_pow_mod(r, 2, f, p, K)
h = gf_add(h, r, p, K)
g = gf_gcd(f, h, p, K)
else:
h = gf_pow_mod(r, (q**n-1) // 2, f, p, K)
g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
if g != [K.one] and g != f:
factors = gf_edf_zassenhaus(g, n, p, K) \
+ gf_edf_zassenhaus(gf_exquo(f, g, p, K), n, p, K)
return _sort_factors(factors, multiple=False)
@cythonized("n,k,i,j")
def gf_ddf_shoup(f, p, K):
"""Kaltofen-Shoup: Deterministic Distinct Degree Factorization
Given a monic square-free polynomial `f` in `GF(p)[x]`, computes
partial distinct degree factorization `f_1 ... f_d` of `f` where
`deg(f_i) != deg(f_j)` for `i != j`. The result is returned as a
list of pairs `(f_i, e_i)` where `deg(f_i) > 0` and `e_i > 0` is
an argument to the equal degree factorization routine.
This algorithm is an improved version of Zassenhaus algorithm for
large `deg(f)` and modulus `p` (especially for `deg(f) ~ lg(p)`).
References
==========
.. [Kaltofen98] E. Kaltofen, V. Shoup, Subquadratic-time Factoring
of Polynomials over Finite Fields, Mathematics of Computation,
Volume 67, Issue 223, 1998, pp. 1179-1197
.. [Shoup95] V. Shoup, A New Polynomial Factorization Algorithm and
its Implementation, Journal of Symbolic Computation, Volume 20,
Issue 4, 1995, pp. 363-397
.. [Gathen92] J. von zur Gathen, V. Shoup, Computing Frobenius Maps
and Factoring Polynomials, ACM Symposium on Theory of Computing,
1992, pp. 187-224
"""
n = gf_degree(f)
k = int(ceil(sqrt(n//2)))
h = gf_pow_mod([K.one, K.zero], int(p), f, p, K)
U = [[K.one,K.zero], h] + [K.zero]*(k-1)
for i in xrange(2, k+1):
U[i] = gf_compose_mod(U[i-1], h, f, p, K)
h, U = U[k], U[:k]
V = [h] + [K.zero]*(k-1)
for i in xrange(1, k):
V[i] = gf_compose_mod(V[i-1], h, f, p, K)
factors = []
for i, v in enumerate(V):
h, j = [K.one], k-1
for u in U:
g = gf_sub(v, u, p, K)
h = gf_mul(h, g, p, K)
h = gf_rem(h, f, p, K)
g = gf_gcd(f, h, p, K)
f = gf_exquo(f, g, p, K)
for u in reversed(U):
h = gf_sub(v, u, p, K)
F = gf_gcd(g, h, p, K)
if F != [K.one]:
factors.append((F, k*(i+1)-j))
g, j = gf_exquo(g, F, p, K), j-1
if f != [K.one]:
factors.append((f, gf_degree(f)))
return factors
@cythonized("n,N,q")
def gf_edf_shoup(f, n, p, K):
"""Gathen-Shoup: Probabilistic Equal Degree Factorization
Given a monic square-free polynomial `f` in `GF(p)[x]` and integer
`n` such that `n` divides `deg(f)`, returns all irreducible factors
`f_1 ... f_d` of `f`, each of degree `n`. This is a complete
factorization over Galois fields.
This algorithm is an improved version of Zassenhaus algorithm for
large `deg(f)` and modulus `p` (especially for `deg(f) ~ lg(p)`).
References
==========
.. [Shoup91] V. Shoup, A Fast Deterministic Algorithm for Factoring
Polynomials over Finite Fields of Small Characteristic, In
Proceedings of International Symposium on Symbolic and
Algebraic Computation, 1991, pp. 14-21
.. [Gathen92] J. von zur Gathen, V. Shoup, Computing Frobenius Maps
and Factoring Polynomials, ACM Symposium on Theory of Computing,
1992, pp. 187-224
"""
N, q = gf_degree(f), int(p)
if not N:
return []
if N <= n:
return [f]
factors, x = [f], [K.one, K.zero]
r = gf_random(N-1, p, K)
h = gf_pow_mod(x, q, f, p, K)
H = gf_trace_map(r, h, x, n-1, f, p, K)[1]
if p == 2:
h1 = gf_gcd(f, H, p, K)
h2 = gf_exquo(f, h1, p, K)
factors = gf_edf_shoup(h1, n, p, K) \
+ gf_edf_shoup(h2, n, p, K)
else:
h = gf_pow_mod(H, (q-1)//2, f, p, K)
h1 = gf_gcd(f, h, p, K)
h2 = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
h3 = gf_exquo(f, gf_mul(h1, h2, p, K), p, K)
factors = gf_edf_shoup(h1, n, p, K) \
+ gf_edf_shoup(h2, n, p, K) \
+ gf_edf_shoup(h3, n, p, K)
return _sort_factors(factors, multiple=False)
@cythonized("n")
def gf_zassenhaus(f, p, K):
"""Factor a square-free `f` in `GF(p)[x]` for medium `p`. """
factors = []
for factor, n in gf_ddf_zassenhaus(f, p, K):
factors += gf_edf_zassenhaus(factor, n, p, K)
return _sort_factors(factors, multiple=False)
@cythonized("n")
def gf_shoup(f, p, K):
"""Factor a square-free `f` in `GF(p)[x]` for large `p`. """
factors = []
for factor, n in gf_ddf_shoup(f, p, K):
factors += gf_edf_shoup(factor, n, p, K)
return _sort_factors(factors, multiple=False)
_factor_methods = {
'berlekamp' : gf_berlekamp, # `p` : small
'zassenhaus' : gf_zassenhaus, # `p` : medium
'shoup' : gf_shoup, # `p` : large
}
def gf_factor_sqf(f, p, K, **args):
"""Factor a square-free polynomial `f` in `GF(p)[x]`. """
lc, f = gf_monic(f, p, K)
if gf_degree(f) < 1:
return lc, []
method = args.get('method')
if method is not None:
factors = _factor_methods[method](f, p, K)
else:
factors = gf_zassenhaus(f, p, K)
return lc, factors
@cythonized("n")
def gf_factor(f, p, K, **args):
"""Factor (non square-free) polynomials in `GF(p)[x]`.
Given a possibly non square-free polynomial `f` in `GF(p)[x]`, returns
its complete factorization into irreducibles::
f_1(x)**e_1 f_2(x)**e_2 ... f_d(x)**e_d
where each `f_i` is a monic polynomial and `gcd(f_i, f_j) == 1`, for
`i != j`. The result is given as a tuple consisting of the leading
coefficient of `f` and a list of factors with their multiplicities.
The algorithm proceeds by first computing square-free decomposition
of `f` and then iteratively factoring each of the square-free factors.
Consider a non square-free polynomial `f = (7*x + 1) (x + 2)**2` in
`GF(11)[x]`. We obtain its factorization into irreducibles as follows::
>>> from sympy.polys.galoistools import gf_factor
>>> from sympy.polys.algebratools import ZZ
>>> gf_factor([5, 2, 7, 2], 11, ZZ)
(5, [([1, 2], 1), ([1, 8], 2)])
We arrived with factorization `f = 5 (x + 2) (x + 8)**2`. We didn't
recover exact form of the input polynomial because we requested to
get monic factors of `f` and its leading coefficient separately.
Square-free factors of `f` can be factored into irreducibles over
`GF(p)` using three very different methods:
1. Berlekamp - efficient for very small values of `p` (usually `p < 25`)
2. Cantor-Zassenhaus - efficient on average input and with "typical" `p`
3. Shoup-Kaltofen-Gathen - efficient with very large inputs and modulus
If you want to use a specific factorization method, instead of relying,
on the algorithm to choose one for you, specify `method` keyword and
set it to one of `berlekamp`, `zassenhaus` or `shoup` values.
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 365
"""
lc, f = gf_monic(f, p, K)
if gf_degree(f) < 1:
return lc, []
factors = []
for g, n in gf_sqf_list(f, p, K)[1]:
for h in gf_factor_sqf(g, p, K, **args)[1]:
factors.append((h, n))
return lc, _sort_factors(factors)
|
the-stack_0_8951 | # flake8: noqa: F811, F401
import asyncio
import sys
from typing import Dict, List, Optional, Tuple
import aiosqlite
import pytest
from taco.consensus.block_header_validation import validate_finished_header_block
from taco.consensus.block_record import BlockRecord
from taco.consensus.blockchain import Blockchain
from taco.consensus.default_constants import DEFAULT_CONSTANTS
from taco.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from taco.consensus.full_block_to_block_record import block_to_block_record
from taco.full_node.block_store import BlockStore
from taco.full_node.coin_store import CoinStore
from taco.server.start_full_node import SERVICE_NAME
from taco.types.blockchain_format.sized_bytes import bytes32
from taco.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from taco.util.block_cache import BlockCache
from tests.block_tools import test_constants
from taco.util.config import load_config
from taco.util.default_root import DEFAULT_ROOT_PATH
from taco.util.generator_tools import get_block_header
from tests.setup_nodes import bt
try:
from reprlib import repr
except ImportError:
pass
from taco.consensus.pot_iterations import calculate_iterations_quality
from taco.full_node.weight_proof import ( # type: ignore
WeightProofHandler,
_map_sub_epoch_summaries,
_validate_sub_epoch_segments,
_validate_summaries_weight,
)
from taco.types.full_block import FullBlock
from taco.types.header_block import HeaderBlock
from taco.util.ints import uint32, uint64
from tests.core.fixtures import (
default_400_blocks,
default_1000_blocks,
default_10000_blocks,
default_10000_blocks_compact,
pre_genesis_empty_slots_1000_blocks,
)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def count_sub_epochs(blockchain, last_hash) -> int:
curr = blockchain._sub_blocks[last_hash]
count = 0
while True:
if curr.height == 0:
break
# next sub block
curr = blockchain._sub_blocks[curr.prev_hash]
# if end of sub-epoch
if curr.sub_epoch_summary_included is not None:
count += 1
return count
def get_prev_ses_block(sub_blocks, last_hash) -> Tuple[BlockRecord, int]:
curr = sub_blocks[last_hash]
blocks = 1
while curr.height != 0:
# next sub block
curr = sub_blocks[curr.prev_hash]
# if end of sub-epoch
if curr.sub_epoch_summary_included is not None:
return curr, blocks
blocks += 1
assert False
async def load_blocks_dont_validate(
blocks,
) -> Tuple[
Dict[bytes32, HeaderBlock], Dict[uint32, bytes32], Dict[bytes32, BlockRecord], Dict[bytes32, SubEpochSummary]
]:
header_cache: Dict[bytes32, HeaderBlock] = {}
height_to_hash: Dict[uint32, bytes32] = {}
sub_blocks: Dict[bytes32, BlockRecord] = {}
sub_epoch_summaries: Dict[bytes32, SubEpochSummary] = {}
prev_block = None
difficulty = test_constants.DIFFICULTY_STARTING
block: FullBlock
for block in blocks:
if block.height > 0:
assert prev_block is not None
difficulty = block.reward_chain_block.weight - prev_block.weight
if block.reward_chain_block.challenge_chain_sp_vdf is None:
assert block.reward_chain_block.signage_point_index == 0
cc_sp: bytes32 = block.reward_chain_block.pos_ss_cc_challenge_hash
else:
cc_sp = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_string: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
test_constants,
block.reward_chain_block.pos_ss_cc_challenge_hash,
cc_sp,
)
assert quality_string is not None
required_iters: uint64 = calculate_iterations_quality(
test_constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp,
)
sub_block = block_to_block_record(
test_constants, BlockCache(sub_blocks, height_to_hash), required_iters, block, None
)
sub_blocks[block.header_hash] = sub_block
height_to_hash[block.height] = block.header_hash
header_cache[block.header_hash] = get_block_header(block, [], [])
if sub_block.sub_epoch_summary_included is not None:
sub_epoch_summaries[block.height] = sub_block.sub_epoch_summary_included
prev_block = block
return header_cache, height_to_hash, sub_blocks, sub_epoch_summaries
async def _test_map_summaries(blocks, header_cache, height_to_hash, sub_blocks, summaries):
curr = sub_blocks[blocks[-1].header_hash]
orig_summaries: Dict[int, SubEpochSummary] = {}
while curr.height > 0:
if curr.sub_epoch_summary_included is not None:
orig_summaries[curr.height] = curr.sub_epoch_summary_included
# next sub block
curr = sub_blocks[curr.prev_hash]
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
# sub epoch summaries validate hashes
summaries, sub_epoch_data_weight, _ = _map_sub_epoch_summaries(
test_constants.SUB_EPOCH_BLOCKS,
test_constants.GENESIS_CHALLENGE,
wp.sub_epochs,
test_constants.DIFFICULTY_STARTING,
)
assert len(summaries) == len(orig_summaries)
class TestWeightProof:
@pytest.mark.asyncio
async def test_weight_proof_map_summaries_1(self, default_400_blocks):
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(default_400_blocks)
await _test_map_summaries(default_400_blocks, header_cache, height_to_hash, sub_blocks, summaries)
@pytest.mark.asyncio
async def test_weight_proof_map_summaries_2(self, default_1000_blocks):
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(default_1000_blocks)
await _test_map_summaries(default_1000_blocks, header_cache, height_to_hash, sub_blocks, summaries)
@pytest.mark.asyncio
async def test_weight_proof_summaries_1000_blocks(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
summaries, sub_epoch_data_weight, _ = _map_sub_epoch_summaries(
wpf.constants.SUB_EPOCH_BLOCKS,
wpf.constants.GENESIS_CHALLENGE,
wp.sub_epochs,
wpf.constants.DIFFICULTY_STARTING,
)
assert _validate_summaries_weight(test_constants, sub_epoch_data_weight, summaries, wp)
# assert res is not None
@pytest.mark.asyncio
async def test_weight_proof_bad_peak_hash(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(b"sadgfhjhgdgsfadfgh")
assert wp is None
@pytest.mark.asyncio
@pytest.mark.skip(reason="broken")
async def test_weight_proof_from_genesis(self, default_400_blocks):
blocks = default_400_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
@pytest.mark.asyncio
async def test_weight_proof_edge_cases(self, default_400_blocks):
blocks: List[FullBlock] = default_400_blocks
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=2
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=1
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=2
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_eos=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_icc_eos=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_ip=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_sp=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=4
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
300,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=False,
)
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000_pre_genesis_empty_slots(self, pre_genesis_empty_slots_1000_blocks):
blocks = pre_genesis_empty_slots_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof10000__blocks_compact(self, default_10000_blocks_compact):
blocks = default_10000_blocks_compact
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000_partial_blocks_compact(self, default_10000_blocks_compact):
blocks: List[FullBlock] = bt.get_consecutive_blocks(
100,
block_list_input=default_10000_blocks_compact,
seed=b"asdfghjkl",
normalized_to_identity_cc_ip=True,
normalized_to_identity_cc_eos=True,
normalized_to_identity_icc_eos=True,
)
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof10000(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, {}, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_check_num_of_samples(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
curr = -1
samples = 0
for sub_epoch_segment in wp.sub_epoch_segments:
if sub_epoch_segment.sub_epoch_n > curr:
curr = sub_epoch_segment.sub_epoch_n
samples += 1
assert samples <= wpf.MAX_SAMPLES
@pytest.mark.asyncio
async def test_weight_proof_extend_no_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
last_ses_height = sorted(summaries.keys())[-1]
wpf_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf_synced.get_proof_of_weight(blocks[last_ses_height].header_hash)
assert wp is not None
# todo for each sampled sub epoch, validate number of segments
wpf_not_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
new_wp = await wpf_synced._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof_extend_new_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
# delete last summary
last_ses_height = sorted(summaries.keys())[-1]
last_ses = summaries[last_ses_height]
del summaries[last_ses_height]
wpf_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf_synced.get_proof_of_weight(blocks[last_ses_height - 10].header_hash)
assert wp is not None
wpf_not_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, height_to_hash, header_cache, {}))
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
summaries[last_ses_height] = last_ses
wpf_synced.blockchain = BlockCache(sub_blocks, header_cache, height_to_hash, summaries)
new_wp = await wpf_synced._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
wpf_synced.blockchain = BlockCache(sub_blocks, header_cache, height_to_hash, summaries)
new_wp = await wpf_synced._create_proof_of_weight(blocks[last_ses_height].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
valid, fork_point, _ = await wpf.validate_weight_proof(new_wp)
assert valid
assert fork_point != 0
@pytest.mark.asyncio
async def test_weight_proof_extend_multiple_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
last_ses_height = sorted(summaries.keys())[-1]
last_ses = summaries[last_ses_height]
before_last_ses_height = sorted(summaries.keys())[-2]
before_last_ses = summaries[before_last_ses_height]
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wpf_verify = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
for x in range(10, -1, -1):
wp = await wpf.get_proof_of_weight(blocks[before_last_ses_height - x].header_hash)
assert wp is not None
valid, fork_point, _ = await wpf_verify.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
summaries[last_ses_height] = last_ses
summaries[before_last_ses_height] = before_last_ses
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
new_wp = await wpf._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf.validate_weight_proof(new_wp)
assert valid
assert fork_point != 0
@pytest.mark.skip("used for debugging")
@pytest.mark.asyncio
async def test_weight_proof_from_database(self):
connection = await aiosqlite.connect("path to db")
block_store: BlockStore = await BlockStore.create(connection)
blocks = await block_store.get_block_records_in_range(0, 0xFFFFFFFF)
peak = len(blocks) - 1
peak_height = blocks[peak].height
headers = await block_store.get_header_blocks_in_range(0, peak_height)
sub_height_to_hash = {}
sub_epoch_summaries = {}
# peak_header = await block_store.get_full_blocks_at([peak_height])
if len(blocks) == 0:
return None, None
assert peak is not None
# Sets the other state variables (peak_height and height_to_hash)
curr: BlockRecord = blocks[peak]
while True:
sub_height_to_hash[curr.height] = curr.header_hash
if curr.sub_epoch_summary_included is not None:
sub_epoch_summaries[curr.height] = curr.sub_epoch_summary_included
if curr.height == 0:
break
curr = blocks[curr.prev_hash]
assert len(sub_height_to_hash) == peak_height + 1
block_cache = BlockCache(blocks, headers, sub_height_to_hash, sub_epoch_summaries)
wpf = WeightProofHandler(DEFAULT_CONSTANTS, block_cache)
wp = await wpf._create_proof_of_weight(sub_height_to_hash[peak_height - 50])
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
await connection.close()
assert valid
print(f"size of proof is {get_size(wp)}")
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, "__dict__"):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
|
the-stack_0_8954 | # -*- coding: utf-8 -*-
import logging
from abc import abstractmethod
import numpy as np
import tensorflow as tf
from jack.readers.multiple_choice.shared import AbstractSingleSupportMCModel
from jack.tfutil.attention import attention_softmax3d
from jack.tfutil.masking import mask_3d
logger = logging.getLogger(__name__)
class DecomposableAttentionModel(AbstractSingleSupportMCModel):
def forward_pass(self, shared_resources, embedded_question, embedded_support, num_classes, tensors):
# final states_fw_bw dimensions:
# [[[batch, output dim], [batch, output_dim]]
model_kwargs = {
'sequence1': embedded_question,
'sequence1_length': tensors.question_length,
'sequence2': embedded_support,
'sequence2_length': tensors.support_length,
'representation_size': shared_resources.config['repr_dim'],
'dropout_keep_prob': 1.0 - shared_resources.config.get('dropout', 0),
'use_masking': True,
}
model = FeedForwardDAM(**model_kwargs)
logits = model()
return logits
class BaseDecomposableAttentionModel:
@abstractmethod
def _transform_input(self, sequence, reuse=False):
raise NotImplementedError
@abstractmethod
def _transform_attend(self, sequence, reuse=False):
raise NotImplementedError
@abstractmethod
def _transform_compare(self, sequence, reuse=False):
raise NotImplementedError
@abstractmethod
def _transform_aggregate(self, v1_v2, reuse=False):
raise NotImplementedError
def __init__(self, sequence1, sequence1_length, sequence2, sequence2_length,
nb_classes=3, reuse=False, use_masking=False, init_std_dev=0.01, *args, **kwargs):
self.init_std_dev = init_std_dev
self.nb_classes = nb_classes
self.sequence1 = sequence1
self.sequence1_length = sequence1_length
self.sequence2 = sequence2
self.sequence2_length = sequence2_length
self.reuse = reuse
embedding1_size = self.sequence1.get_shape()[-1].value
embedding2_size = self.sequence2.get_shape()[-1].value
assert embedding1_size == embedding2_size
# [batch_size, time_steps, embedding_size] -> [batch_size, time_steps, representation_size]
self.transformed_sequence1 = self._transform_input(self.sequence1, reuse=self.reuse)
# [batch_size, time_steps, embedding_size] -> [batch_size, time_steps, representation_size]
self.transformed_sequence2 = self._transform_input(self.sequence2, reuse=True)
self.transformed_sequence1_length = self.sequence1_length
self.transformed_sequence2_length = self.sequence2_length
logger.info('Building the Attend graph ..')
self.raw_attentions = None
self.attention_sentence1 = self.attention_sentence2 = None
# tensors with shape (batch_size, time_steps, num_units)
self.alpha, self.beta = self.attend(self.transformed_sequence1, self.transformed_sequence2,
sequence1_lengths=self.transformed_sequence1_length,
sequence2_lengths=self.transformed_sequence2_length,
use_masking=use_masking, reuse=self.reuse)
logger.info('Building the Compare graph ..')
# tensor with shape (batch_size, time_steps, num_units)
self.v1 = self.compare(self.transformed_sequence1, self.beta, reuse=self.reuse)
# tensor with shape (batch_size, time_steps, num_units)
self.v2 = self.compare(self.transformed_sequence2, self.alpha, reuse=True)
logger.info('Building the Aggregate graph ..')
self.logits = self.aggregate(self.v1, self.v2, self.nb_classes,
v1_lengths=self.transformed_sequence1_length,
v2_lengths=self.transformed_sequence2_length,
use_masking=use_masking, reuse=self.reuse)
def __call__(self):
return self.logits
def attend(self, sequence1, sequence2,
sequence1_lengths=None, sequence2_lengths=None, use_masking=False, reuse=False):
"""
Attend phase.
Args:
sequence1: tensor with shape (batch_size, time_steps, num_units)
sequence2: tensor with shape (batch_size, time_steps, num_units)
sequence1_lengths: time_steps in sequence1
sequence2_lengths: time_steps in sequence2
use_masking: use masking
reuse: reuse variables
Returns:
two tensors with shape (batch_size, time_steps, num_units)
"""
with tf.variable_scope('attend') as _:
# tensor with shape (batch_size, time_steps, num_units)
transformed_sequence1 = self._transform_attend(sequence1, reuse)
# tensor with shape (batch_size, time_steps, num_units)
transformed_sequence2 = self._transform_attend(sequence2, True)
# tensor with shape (batch_size, time_steps, time_steps)
self.raw_attentions = tf.matmul(transformed_sequence1, tf.transpose(transformed_sequence2, [0, 2, 1]))
masked_raw_attentions = self.raw_attentions
if use_masking:
masked_raw_attentions = mask_3d(sequences=masked_raw_attentions,
sequence_lengths=sequence2_lengths,
mask_value=- np.inf, dimension=2)
self.attention_sentence1 = attention_softmax3d(masked_raw_attentions)
# tensor with shape (batch_size, time_steps, time_steps)
attention_transposed = tf.transpose(self.raw_attentions, [0, 2, 1])
masked_attention_transposed = attention_transposed
if use_masking:
masked_attention_transposed = mask_3d(sequences=masked_attention_transposed,
sequence_lengths=sequence1_lengths,
mask_value=- np.inf, dimension=2)
self.attention_sentence2 = attention_softmax3d(masked_attention_transposed)
# tensors with shape (batch_size, time_steps, num_units)
alpha = tf.matmul(self.attention_sentence2, sequence1, name='alpha')
beta = tf.matmul(self.attention_sentence1, sequence2, name='beta')
return alpha, beta
def compare(self, sentence, soft_alignment, reuse=False):
"""
Compare phase.
Args:
sentence: tensor with shape (batch_size, time_steps, num_units)
soft_alignment: tensor with shape (batch_size, time_steps, num_units)
reuse: reuse variables
Returns:
tensor with shape (batch_size, time_steps, num_units)
"""
# tensor with shape (batch, time_steps, num_units)
sentence_and_alignment = tf.concat(axis=2, values=[sentence, soft_alignment])
transformed_sentence_and_alignment = self._transform_compare(sentence_and_alignment, reuse=reuse)
return transformed_sentence_and_alignment
def aggregate(self, v1, v2, num_classes,
v1_lengths=None, v2_lengths=None, use_masking=False, reuse=False):
"""
Aggregate phase.
Args:
v1: tensor with shape (batch_size, time_steps, num_units)
v2: tensor with shape (batch_size, time_steps, num_units)
num_classes: number of output units
v1_lengths: time_steps in v1
v2_lengths: time_steps in v2
use_masking: use masking
reuse: reuse variables
"""
with tf.variable_scope('aggregate', reuse=reuse) as _:
if use_masking:
v1 = mask_3d(sequences=v1, sequence_lengths=v1_lengths, mask_value=0, dimension=1)
v2 = mask_3d(sequences=v2, sequence_lengths=v2_lengths, mask_value=0, dimension=1)
v1_sum, v2_sum = tf.reduce_sum(v1, [1]), tf.reduce_sum(v2, [1])
v1_v2 = tf.concat(axis=1, values=[v1_sum, v2_sum])
transformed_v1_v2 = self._transform_aggregate(v1_v2, reuse=reuse)
logits = tf.contrib.layers.fully_connected(inputs=transformed_v1_v2,
num_outputs=num_classes,
weights_initializer=tf.random_normal_initializer(0.0, 0.01),
biases_initializer=tf.zeros_initializer(),
activation_fn=None)
return logits
class FeedForwardDAM(BaseDecomposableAttentionModel):
def __init__(self, representation_size=200, dropout_keep_prob=1.0, *args, **kwargs):
self.representation_size = representation_size
self.dropout_keep_prob = dropout_keep_prob
super().__init__(*args, **kwargs)
def _transform_input(self, sequence, reuse=False):
with tf.variable_scope('transform_embeddings', reuse=reuse) as _:
projection = tf.contrib.layers.fully_connected(inputs=sequence, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=None, activation_fn=None)
return projection
def _transform_attend(self, sequence, reuse=False):
with tf.variable_scope('transform_attend', reuse=reuse) as _:
projection = tf.nn.dropout(sequence, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=projection, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
projection = tf.nn.dropout(projection, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=projection, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
return projection
def _transform_compare(self, sequence, reuse=False):
with tf.variable_scope('transform_compare', reuse=reuse) as _:
projection = tf.nn.dropout(sequence, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=projection, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
projection = tf.nn.dropout(projection, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=projection, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
return projection
def _transform_aggregate(self, v1_v2, reuse=False):
with tf.variable_scope('transform_aggregate', reuse=reuse) as _:
projection = tf.nn.dropout(v1_v2, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=projection, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
projection = tf.nn.dropout(projection, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=projection, num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, self.init_std_dev),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
return projection
|
the-stack_0_8955 | import os
import base64
import binascii
from collections import namedtuple
import hexdump
import intervaltree
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QBrush
from PyQt5.QtGui import QPixmap
from PyQt5.QtGui import QMouseEvent
from PyQt5.QtGui import QKeySequence
from PyQt5.QtGui import QFontDatabase
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QMimeData
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QModelIndex
from PyQt5.QtCore import QItemSelection
from PyQt5.QtCore import QItemSelectionModel
from PyQt5.QtCore import QAbstractTableModel
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QTableView
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QInputDialog
from PyQt5.QtWidgets import QItemDelegate
from PyQt5.QtWidgets import QAbstractItemView
from .hexview_auto import Ui_Form as HexViewBase
from .common import h
from .common import LoggingObject
from .tablecellstylemodels import row_start_index
from .tablecellstylemodels import row_end_index
from .tablecellstylemodels import row_number
from .tablecellstylemodels import ROLE_BORDER
from .tablecellstylemodels import ColorModel
from .tablecellstylemodels import BorderModel
NamedColor = namedtuple("NamedColor", ["name", "qcolor"])
QT_COLORS = (
NamedColor("red", Qt.red),
NamedColor("green", Qt.green),
NamedColor("blue", Qt.blue),
NamedColor("black", Qt.black),
NamedColor("dark red", Qt.darkRed),
NamedColor("dark green", Qt.darkGreen),
NamedColor("dark blue", Qt.darkBlue),
NamedColor("cyan", Qt.cyan),
NamedColor("magenta", Qt.magenta),
NamedColor("yellow", Qt.yellow),
NamedColor("gray", Qt.gray),
NamedColor("dark cyan", Qt.darkCyan),
NamedColor("dark magenta", Qt.darkMagenta),
NamedColor("dark yellow", Qt.darkYellow),
NamedColor("dark gray", Qt.darkGray),
NamedColor("light gray", Qt.lightGray),
)
def make_color_icon(color):
pixmap = QPixmap(10, 10)
pixmap.fill(color)
return QIcon(pixmap)
class HexItemDelegate(QItemDelegate):
def __init__(self, model, parent, *args):
super(HexItemDelegate, self).__init__(parent)
self._model = model
def paint(self, qpainter, option, qindex):
super(HexItemDelegate, self).paint(qpainter, option, qindex)
border = self._model.data(qindex, ROLE_BORDER)
if border is None:
return
qpainter.setPen(border.theme.color)
r = option.rect
if border.top:
qpainter.drawLine(r.topLeft(), r.topRight())
if border.bottom:
qpainter.drawLine(r.bottomLeft(), r.bottomRight())
if border.left:
qpainter.drawLine(r.topLeft(), r.bottomLeft())
if border.right:
qpainter.drawLine(r.topRight(), r.bottomRight())
class HexTableModel(QAbstractTableModel):
FILTER = ''.join([(len(repr(chr(x)))==3 or chr(x) == "\\") and chr(x) or '.' for x in range(256)])
def __init__(self, buf, parent=None, *args):
super(HexTableModel, self).__init__(parent, *args)
self._buf = buf
self._colors = ColorModel(self)
self._borders = BorderModel(self)
self._colors.rangeChanged.connect(self._handle_color_range_changed)
self._borders.rangeChanged.connect(self._handle_border_range_changed)
def getColorModel(self):
return self._colors
def setColorModel(self, color_model):
self._colors.rangeChanged.disconnect(self._handle_color_range_changed)
self._colors = color_model
self._colors.rangeChanged.connect(self._handle_color_range_changed)
# TODO: re-render all cells
def getBorderModel(self):
return self._borders
def setBorderModel(self, color_model):
self._borders.rangeChanged.disconnect(self._handle_border_range_changed)
self._borders = color_model
self._borders.rangeChanged.connect(self._handle_border_range_changed)
# TODO: re-render all cells
@staticmethod
def qindex2index(index):
""" from a QIndex (row/column coordinate system), get the buffer index of the byte """
r = index.row()
c = index.column()
if c > 0x10:
return (0x10 * r) + c - 0x11
else:
return (0x10 * r) + c
def index2qindexb(self, index):
""" from a buffer index, get the QIndex (row/column coordinate system) of the byte pane """
r = index // 0x10
c = index % 0x10
return self.index(r, c)
def index2qindexc(self, index):
""" from a buffer index, get the QIndex (row/column coordinate system) of the char pane """
r = (index // 0x10)
c = index % 0x10 + 0x11
return self.index(r, c)
def rowCount(self, parent):
if len(self._buf) % 0x10 != 0:
return (len(self._buf) // 0x10) + 1
else:
return len(self._buf) // 0x10
def columnCount(self, parent):
return 0x21
def data(self, index, role):
if not index.isValid():
return None
elif self.qindex2index(index) >= len(self._buf):
return None
col = index.column()
bindex = self.qindex2index(index)
if role == Qt.DisplayRole:
if col == 0x10:
return ""
c = self._buf[bindex]
if isinstance(c, str):
# python2.7 mmap is a str interface, not bytearray
c = ord(c)
if col > 0x10:
return chr(c).translate(HexTableModel.FILTER)
else:
return "%02x" % (c)
elif role == Qt.BackgroundRole:
# don't color the divider column
if col == 0x10:
return None
color = self._colors.get_color(bindex)
if color is not None:
return QBrush(color)
return None
elif role == ROLE_BORDER:
if col == 0x10:
return None
return self._borders.get_border(bindex)
else:
return None
@property
def data_length(self):
return len(self._buf)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
elif orientation == Qt.Horizontal:
if section < 0x10:
return "%01X" % (section)
else:
return ""
elif orientation == Qt.Vertical:
return "%04X" % (section * 0x10)
else:
return None
def _emit_data_changed(self, start_bindex, end_bindex):
for i in range(start_bindex, end_bindex):
# mark data changed to encourage re-rendering of cell
qib = self.index2qindexb(i)
qic = self.index2qindexc(i)
self.dataChanged.emit(qib, qib)
self.dataChanged.emit(qic, qic)
def _handle_color_range_changed(self, range):
self._emit_data_changed(range.begin, range.end + 1)
def _handle_border_range_changed(self, range):
self._emit_data_changed(range.begin, range.end + 1)
class HexItemSelectionModel(QItemSelectionModel):
selectionRangeChanged = pyqtSignal([int])
def __init__(self, model, view):
"""
:type view: HexTableView
"""
super(HexItemSelectionModel, self).__init__(model)
self._model = model
self._view = view
self._start_qindex = None
self._view.leftMousePressedIndex.connect(self._handle_mouse_pressed)
self._view.leftMouseMovedIndex.connect(self._handle_mouse_moved)
self._view.leftMouseReleasedIndex.connect(self._handle_mouse_released)
self.start = None
self.end = None
def _bselect(self, selection, start_bindex, end_bindex):
""" add the given buffer indices to the given QItemSelection, both byte and char panes """
selection.select(self._model.index2qindexb(start_bindex), self._model.index2qindexb(end_bindex))
selection.select(self._model.index2qindexc(start_bindex), self._model.index2qindexc(end_bindex))
def _do_select(self, start_bindex, end_bindex):
"""
select the given range by buffer indices
selects items like this:
..................
......xxxxxxxxxxxx
xxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxx
xxxxxxxxxxxx......
..................
*not* like this:
..................
......xxxxxx......
......xxxxxx......
......xxxxxx......
......xxxxxx......
..................
"""
self.select(QItemSelection(), QItemSelectionModel.Clear)
if start_bindex > end_bindex:
start_bindex, end_bindex = end_bindex, start_bindex
selection = QItemSelection()
if row_number(end_bindex) - row_number(start_bindex) == 0:
# all on one line
self._bselect(selection, start_bindex, end_bindex)
elif row_number(end_bindex) - row_number(start_bindex) == 1:
# two lines
self._bselect(selection, start_bindex, row_end_index(start_bindex))
self._bselect(selection, row_start_index(end_bindex), end_bindex)
else:
# many lines
self._bselect(selection, start_bindex, row_end_index(start_bindex))
self._bselect(selection, row_start_index(start_bindex) + 0x10, row_end_index(end_bindex) - 0x10)
self._bselect(selection, row_start_index(end_bindex), end_bindex)
self.select(selection, QItemSelectionModel.SelectCurrent)
self.start = start_bindex
self.end = end_bindex
self.selectionRangeChanged.emit(end_bindex)
def bselect(self, start_bindex, end_bindex):
""" the public interface to _do_select """
return self._do_select(start_bindex, end_bindex)
def handle_move_key(self, key):
if self._start_qindex == self._model.index2qindexc(self.start) or \
self._start_qindex == self._model.index2qindexb(self.start):
i = self.end
else:
i = self.start
if key == QKeySequence.MoveToEndOfDocument:
i = self._model.data_length - 1
elif key == QKeySequence.MoveToEndOfLine:
i = row_end_index(i)
elif key == QKeySequence.MoveToNextChar:
i += 1
elif key == QKeySequence.MoveToNextLine:
i += 0x10
elif key == QKeySequence.MoveToNextPage:
i += 0x40
elif key == QKeySequence.MoveToNextWord:
i += 1
elif key == QKeySequence.MoveToPreviousChar:
i -= 1
elif key == QKeySequence.MoveToPreviousLine:
i -= 0x10
elif key == QKeySequence.MoveToPreviousPage:
i -= 0x40
elif key == QKeySequence.MoveToPreviousWord:
i -= 1
elif key == QKeySequence.MoveToStartOfDocument:
i = 0x0
elif key == QKeySequence.MoveToStartOfLine:
i = row_start_index(i)
else:
raise RuntimeError("Unexpected movement key: %s" % (key))
# this behavior selects the smallest or largest cell in the
# same column as the out-of-bounds index
if i < 0:
i %= 0x10
if i > self._model.data_length:
i %= 0x10
i = self._model.data_length - 0x10 + i
self.bselect(i, i)
def handle_select_key(self, key):
i = None
j = None
if self._start_qindex == self._model.index2qindexc(self.start) or \
self._start_qindex == self._model.index2qindexb(self.start):
i = self.end
j = self.start
else:
i = self.start
j = self.end
if key == QKeySequence.SelectEndOfDocument:
i = self._model.data_length - 1
elif key == QKeySequence.SelectEndOfLine:
i = row_end_index(i)
elif key == QKeySequence.SelectNextChar:
i += 1
elif key == QKeySequence.SelectNextLine:
i += 0x10
elif key == QKeySequence.SelectNextPage:
i += 0x40
elif key == QKeySequence.SelectNextWord:
i += 1
elif key == QKeySequence.SelectPreviousChar:
i -= 1
elif key == QKeySequence.SelectPreviousLine:
i -= 0x10
elif key == QKeySequence.SelectPreviousPage:
i -= 0x40
elif key == QKeySequence.SelectPreviousWord:
i -= 1
elif key == QKeySequence.SelectStartOfDocument:
i = 0x0
elif key == QKeySequence.SelectStartOfLine:
i = row_start_index(i)
else:
raise RuntimeError("Unexpected select key: %s" % (key))
# this behavior selects the smallest or largest cell in the
# same column as the out-of-bounds index
if i < 0:
i %= 0x10
if i > self._model.data_length:
i %= 0x10
i = self._model.data_length - 0x10 + i
# need to explicitly reset start_qindex so that the current index
# doesn't get confused when coming from a selection of a single cell
# (in the check at the start of this function to decide which end of
# the selection was most recently active)
self._start_qindex = self._model.index2qindexc(j)
self.bselect(i, j)
def _update_selection(self, qindex1, qindex2):
""" select the given range by qmodel indices """
m = self.model()
self._do_select(m.qindex2index(qindex1), m.qindex2index(qindex2))
def _handle_mouse_pressed(self, qindex):
self._start_qindex = qindex
self._update_selection(qindex, qindex)
def _handle_mouse_moved(self, qindex):
self._update_selection(self._start_qindex, qindex)
def _handle_mouse_released(self, qindex):
self._update_selection(self._start_qindex, qindex)
self._start_qindex = None
class HexTableView(QTableView, LoggingObject):
""" table view that handles click events for better selection handling """
leftMousePressed = pyqtSignal([QMouseEvent])
leftMousePressedIndex = pyqtSignal([QModelIndex])
leftMouseMoved = pyqtSignal([QMouseEvent])
leftMouseMovedIndex = pyqtSignal([QModelIndex])
leftMouseReleased = pyqtSignal([QMouseEvent])
leftMouseReleasedIndex = pyqtSignal([QModelIndex])
moveKeyPressed = pyqtSignal([QKeySequence])
selectKeyPressed = pyqtSignal([QKeySequence])
def __init__(self, *args, **kwargs):
super(HexTableView, self).__init__(*args, **kwargs)
self.leftMousePressed.connect(self._handle_mouse_press)
self.leftMouseMoved.connect(self._handle_mouse_move)
self.leftMouseReleased.connect(self._handle_mouse_release)
self._press_start_index = None
self._press_current_index = None
self._press_end_index = None
self._is_tracking_mouse = False
def _reset_press_state(self):
self._press_start_index = None
self._press_current_index = None
self._press_end_index = None
def mousePressEvent(self, event):
super(HexTableView, self).mousePressEvent(event)
if event.buttons() & Qt.LeftButton:
self.leftMousePressed.emit(event)
def mouseMoveEvent(self, event):
super(HexTableView, self).mouseMoveEvent(event)
if event.buttons() & Qt.LeftButton:
self.leftMouseMoved.emit(event)
def mouseReleaseEvent(self, event):
super(HexTableView, self).mousePressEvent(event)
if event.buttons() & Qt.LeftButton:
self.leftMouseReleased.emit(event)
def keyPressEvent(self, event):
move_keys = (
QKeySequence.MoveToEndOfDocument,
QKeySequence.MoveToEndOfLine,
QKeySequence.MoveToNextChar,
QKeySequence.MoveToNextLine,
QKeySequence.MoveToNextPage,
QKeySequence.MoveToNextWord,
QKeySequence.MoveToPreviousChar,
QKeySequence.MoveToPreviousLine,
QKeySequence.MoveToPreviousPage,
QKeySequence.MoveToPreviousWord,
QKeySequence.MoveToStartOfDocument,
QKeySequence.MoveToStartOfLine,
)
for move_key in move_keys:
if event.matches(move_key):
self.moveKeyPressed.emit(move_key)
return
t = event.text()
KeyMapping = namedtuple("KeyMapping", ["source", "destination"])
vim_move_mappings = (
KeyMapping("j", QKeySequence.MoveToNextLine),
KeyMapping("k", QKeySequence.MoveToPreviousLine),
KeyMapping("h", QKeySequence.MoveToPreviousChar),
KeyMapping("l", QKeySequence.MoveToNextChar),
KeyMapping("^", QKeySequence.MoveToStartOfLine),
KeyMapping("$", QKeySequence.MoveToEndOfLine),
)
for vim_mapping in vim_move_mappings:
if vim_mapping.source == t:
self.moveKeyPressed.emit(vim_mapping.destination)
return
select_keys = (
QKeySequence.SelectAll,
QKeySequence.SelectEndOfDocument,
QKeySequence.SelectEndOfLine,
QKeySequence.SelectNextChar,
QKeySequence.SelectNextLine,
QKeySequence.SelectNextPage,
QKeySequence.SelectNextWord,
QKeySequence.SelectPreviousChar,
QKeySequence.SelectPreviousLine,
QKeySequence.SelectPreviousPage,
QKeySequence.SelectPreviousWord,
QKeySequence.SelectStartOfDocument,
QKeySequence.SelectStartOfLine,
)
for select_key in select_keys:
if event.matches(select_key):
self.selectKeyPressed.emit(select_key)
return
t = event.text()
KeyMapping = namedtuple("KeyMapping", ["source", "destination"])
vim_select_mappings = (
KeyMapping("J", QKeySequence.SelectNextLine),
KeyMapping("K", QKeySequence.SelectPreviousLine),
KeyMapping("H", QKeySequence.SelectPreviousChar),
KeyMapping("L", QKeySequence.SelectNextChar),
)
for vim_mapping in vim_select_mappings:
if vim_mapping.source == t:
self.selectKeyPressed.emit(vim_mapping.destination)
return
def _handle_mouse_press(self, key_event):
self._reset_press_state()
self._press_start_index = self.indexAt(key_event.pos())
self._is_tracking_mouse = True
self.leftMousePressedIndex.emit(self._press_start_index)
def _handle_mouse_move(self, key_event):
if self._is_tracking_mouse:
i = self.indexAt(key_event.pos())
if i != self._press_current_index:
self._press_current_index = i
self.leftMouseMovedIndex.emit(i)
def _handle_mouse_release(self, key_event):
self._press_end_index = self.indexAt(key_event.pos())
self._is_tracking_mouse = False
self.leftMouseReleasedIndex.emit(self._press_end_index)
Origin = namedtuple("Origin", ["offset", "name"])
class HexViewWidget(QWidget, HexViewBase, LoggingObject):
originsChanged = pyqtSignal()
def __init__(self, buf, parent=None):
super(HexViewWidget, self).__init__()
self.setupUi(self)
self._buf = buf
self._model = HexTableModel(self._buf)
self._colored_regions = intervaltree.IntervalTree()
self._origins = []
# ripped from pyuic5 ui/hexview.ui
# at commit 6c9edffd32706097d7eba8814d306ea1d997b25a
# so we can add our custom HexTableView instance
self.view = HexTableView(self)
sizePolicy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.view.sizePolicy().hasHeightForWidth())
self.view.setSizePolicy(sizePolicy)
self.view.setMinimumSize(QSize(660, 0))
self.view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.view.setSelectionMode(QAbstractItemView.NoSelection)
self.view.setShowGrid(False)
self.view.setWordWrap(False)
self.view.setObjectName("view")
self.view.horizontalHeader().setDefaultSectionSize(25)
self.view.horizontalHeader().setMinimumSectionSize(25)
self.view.verticalHeader().setDefaultSectionSize(21)
self.mainLayout.insertWidget(0, self.view)
# end rip
# TODO: provide a HexViewWidget.setModel method, and don't build it ourselves
self.view.setModel(self._model)
for i in range(0x10):
self.view.setColumnWidth(i, 23)
self.view.setColumnWidth(0x10, 12)
for i in range(0x11, 0x22):
self.view.setColumnWidth(i, 11)
self._hsm = HexItemSelectionModel(self._model, self.view)
self.view.setSelectionModel(self._hsm)
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self._handle_context_menu_requested)
self._hsm.selectionRangeChanged.connect(self._handle_selection_range_changed)
self.originsChanged.connect(self._handle_origins_changed)
self.view.moveKeyPressed.connect(self._hsm.handle_move_key)
self.view.selectKeyPressed.connect(self._hsm.handle_select_key)
f = QFontDatabase.systemFont(QFontDatabase.FixedFont)
self.view.setFont(f)
self.statusLabel.setFont(f)
self.view.setItemDelegate(HexItemDelegate(self._model, self))
self.statusLabel.setText("")
def getModel(self):
return self._model
def getColorModel(self):
""" this is a shortcut, to make it easy to add/remove colored ranges """
return self.getModel().getColorModel()
def getBorderModel(self):
""" this is a shortcut, to make it easy to add/remove bordered ranges """
return self.getModel().getBorderModel()
def getSelectionModel(self):
return self._hsm
def scrollTo(self, index):
qi = self._model.index2qindexb(index)
self.view.scrollTo(qi)
def _render_status_text(self):
txt = []
start = self._hsm.start
end = self._hsm.end
if start not in (None, -1) and end not in (None, -1):
txt.append("sel: [{:s}, {:s}]".format(hex(start), hex(end)))
txt.append("len: {:s}".format(hex(end - start + 1)))
for origin in self._origins:
txt.append("from '{:s}': {:s}".format(
origin.name, hex(start - origin.offset)))
self.statusLabel.setText(" ".join(txt))
def _handle_selection_range_changed(self, end_bindex):
self._render_status_text()
self.scrollTo(end_bindex)
def _handle_origins_changed(self):
self._render_status_text()
def get_context_menu(self, qpoint):
""" override this method to customize the context menu """
menu = QMenu(self)
index = self.view.indexAt(qpoint)
def add_action(menu, text, handler, icon=None):
a = None
if icon is None:
a = QAction(text, self)
else:
a = QAction(icon, text, self)
a.triggered.connect(handler)
menu.addAction(a)
add_action(menu, "Color selection", self._handle_color_selection)
# duplication here with vstructui
color_menu = menu.addMenu("Color selection...")
# need to escape the closure capture on the color loop variable below
# hint from: http://stackoverflow.com/a/6035865/87207
def make_color_selection_handler(color):
return lambda: self._handle_color_selection(color=color)
for color in QT_COLORS:
add_action(color_menu, "{:s}".format(color.name),
make_color_selection_handler(color.qcolor), make_color_icon(color.qcolor))
start = self._hsm.start
end = self._hsm.end
cm = self.getColorModel()
if (start == end and cm.is_index_colored(start)) or cm.is_region_colored(start, end):
def make_remove_color_handler(r):
return lambda: self._handle_remove_color_range(r)
remove_color_menu = menu.addMenu("Remove color...")
for cr in cm.get_region_colors(start, end):
pixmap = QPixmap(10, 10)
pixmap.fill(cr.color)
icon = QIcon(pixmap)
add_action(remove_color_menu,
"Remove color [{:s}, {:s}], len: {:s}".format(h(cr.begin), h(cr.end), h(cr.end - cr.begin)),
make_remove_color_handler(cr), make_color_icon(cr.color))
menu.addSeparator() # -----------------------------------------------------------------
add_action(menu, "Copy selection (binary)", self._handle_copy_binary)
copy_menu = menu.addMenu("Copy...")
add_action(copy_menu, "Copy selection (binary)", self._handle_copy_binary)
add_action(copy_menu, "Copy selection (text)", self._handle_copy_text)
add_action(copy_menu, "Copy selection (hex)", self._handle_copy_hex)
add_action(copy_menu, "Copy selection (hexdump)", self._handle_copy_hexdump)
add_action(copy_menu, "Copy selection (base64)", self._handle_copy_base64)
menu.addSeparator() # -----------------------------------------------------------------
add_action(menu, "Add origin", lambda: self._handle_add_origin(index))
return menu
def _handle_context_menu_requested(self, qpoint):
self.get_context_menu(qpoint).exec_(self.view.mapToGlobal(qpoint))
def _handle_color_selection(self, color=None):
# qt seems to set non-existant keyword args to False, so we manually reset to None
if not color:
color = None
s = self._hsm.start
e = self._hsm.end + 1
range = self.getColorModel().color_region(s, e, color=color)
self._hsm.bselect(-1, -1)
# seems to be a bit of duplication here and in the ColorModel?
self._colored_regions.addi(s, e, range)
def _handle_remove_color_range(self, range):
self.getColorModel().clear_range(range)
@property
def _selected_data(self):
start = self._hsm.start
end = self._hsm.end
return self._buf[start:end]
def _handle_copy_binary(self):
mime = QMimeData()
# mime type suggested here: http://stackoverflow.com/a/6783972/87207
mime.setData("application/octet-stream", self._selected_data)
QApplication.clipboard().setMimeData(mime)
def _handle_copy_text(self):
mime = QMimeData()
mime.setText(self._selected_data)
QApplication.clipboard().setMimeData(mime)
def _handle_copy_hex(self):
mime = QMimeData()
mime.setText(binascii.b2a_hex(self._selected_data))
QApplication.clipboard().setMimeData(mime)
def _handle_copy_hexdump(self):
mime = QMimeData()
t = hexdump.hexdump(self._selected_data, result="return")
mime.setText(t)
QApplication.clipboard().setMimeData(mime)
def _handle_copy_base64(self):
mime = QMimeData()
mime.setText(base64.b64encode(self._selected_data))
QApplication.clipboard().setMimeData(mime)
def add_origin(self, origin):
self._origins.append(origin)
self.originsChanged.emit()
def remove_origin(self, origin):
self._origins.remove(origin)
self.originsChanged.emit()
def _handle_add_origin(self, qindex):
index = self.getModel().qindex2index(qindex)
name, ok = QInputDialog.getText(self, "Add origin...", "Origin name:")
if ok and name:
self.add_origin(Origin(index, name))
|
the-stack_0_8958 | import turtle
import time
import random
delay = 0.1
score = 0
high_score = 0
wn = turtle.Screen()
wn.title("Snake")
wn.bgcolor("green")
wn.setup(width=600, height=600)
wn.tracer(0)
head = turtle.Turtle()
head.speed(0)
head.shape("square")
head.color("black")
head.penup()
head.goto(0,0)
head.direction = "stop"
food = turtle.Turtle()
food.speed(0)
food.shape("circle")
food.color("red")
food.penup()
food.goto(0,100)
segments = []
pen = turtle.Turtle()
pen.speed(0)
pen.shape("square")
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Score: 0 High Score: 0", align="center", font=("Courier", 24, "normal"))
def go_up():
if head.direction != "down":
head.direction = "up"
def go_down():
if head.direction != "up":
head.direction = "down"
def go_left():
if head.direction != "right":
head.direction = "left"
def go_right():
if head.direction != "left":
head.direction = "right"
def move():
if head.direction == "up":
y = head.ycor()
head.sety(y + 20)
if head.direction == "down":
y = head.ycor()
head.sety(y - 20)
if head.direction == "left":
x = head.xcor()
head.setx(x - 20)
if head.direction == "right":
x = head.xcor()
head.setx(x + 20)
wn.listen()
wn.onkeypress(go_up, "w")
wn.onkeypress(go_down, "s")
wn.onkeypress(go_left, "a")
wn.onkeypress(go_right, "d")
while True:
wn.update()
if head.xcor()>290 or head.xcor()<-290 or head.ycor()>290 or head.ycor()<-290:
time.sleep(1)
head.goto(0,0)
head.direction = "stop"
for segment in segments:
segment.goto(1000, 1000)
segments.clear()
score = 0
delay = 0.1
pen.clear()
pen.write("Score: {} High Score: {}".format(score, high_score), align="center", font=("Courier", 24, "normal"))
if head.distance(food) < 20:
x = random.randint(-290, 290)
y = random.randint(-290, 290)
food.goto(x,y)
new_segment = turtle.Turtle()
new_segment.speed(0)
new_segment.shape("square")
new_segment.color("grey")
new_segment.penup()
segments.append(new_segment)
delay -= 0.001
score += 10
if score > high_score:
high_score = score
pen.clear()
pen.write("Score: {} High Score: {}".format(score, high_score), align="center", font=("Courier", 24, "normal"))
for index in range(len(segments)-1, 0, -1):
x = segments[index-1].xcor()
y = segments[index-1].ycor()
segments[index].goto(x, y)
if len(segments) > 0:
x = head.xcor()
y = head.ycor()
segments[0].goto(x,y)
move()
for segment in segments:
if segment.distance(head) < 20:
time.sleep(1)
head.goto(0,0)
head.direction = "stop"
for segment in segments:
segment.goto(1000, 1000)
segments.clear()
score = 0
delay = 0.1
pen.clear()
pen.write("Score: {} High Score: {}".format(score, high_score), align="center", font=("Courier", 24, "normal"))
time.sleep(delay)
wn.mainloop()
|
the-stack_0_8959 | """
inorder: [LEFT]root[RIGHT]
postorder: [LEFT][RIGHT]root
First thing we know is the value of root, which is the last element of `postorder`.
Find the index of the root in `inorder`. So find out the interval of [LEFT] and [RIGHT] in `inorder`.
The length of the [LEFT] and [RIGHT] in `inorder` are the same with the length of the [LEFT] and [RIGHT] in `postorder`.
"""
class Solution(object):
def buildTree(self, inorder, postorder):
if not inorder or not postorder: return None
root = TreeNode(postorder[-1])
if len(inorder)==1: return root
r = inorder.index(root.val)
leftInOrder = inorder[:r]
leftPostOrder = postorder[:r]
rightInOrder = inorder[r+1:]
rightPostOrder = postorder[r:len(postorder)-1]
root.left = self.buildTree(leftInOrder, leftPostOrder)
root.right = self.buildTree(rightInOrder, rightPostOrder)
return root
"""
Time: O(NLogN). For each node, we need to do an iteration to its children. To be precise..
O(N) for constructing root.
O(N/2) for constructing root.left
O(N/2) for constructing root.right
O(N/4) for constructing root.left.left
O(N/4) for constructing root.left.right
O(N/4) for constructing root.right.left
O(N/4) for constructing root.right.right
...
To improve this, we can use a hash table to get the index of `i` below
Space: O(NLogN).
For each node, we need to construct inorder/postorder arrays of its children.
We can improve this by using pointers.
"""
"""
Improved version.
Time: O(N).
Space: O(N). For `index`.
"""
class Solution(object):
def buildTree(self, inorder, postorder):
def helper(i, j, k, l):
if j-i<=0: return None
if l-k<=0: return None
root = TreeNode(postorder[l-1])
if j-i==1: return root
r = index[root.val]
root.left = helper(i, r, k, k+r-i)
root.right = helper(r+1, j, k+r-i, l-1)
return root
index = {} #the index of inorder
for i, n in enumerate(inorder): index[n] = i
return helper(0, len(inorder), 0, len(postorder)) |
the-stack_0_8960 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, PHYTEC Messtechnik GmbH
# Author: Stefan Müller-Klieser <[email protected]>
import sys
import argparse
import os
import shutil
from phylib import *
class BSP_SiteConfLoader(BoardSupportPackage):
"""Extends the BoardSupportPackage class with functionalty to
manage a site.conf. This file is Host or user specific and defines
settings very specific to the location where the bsp is getting built
"""
def __init__(self):
super(BSP_SiteConfLoader, self).__init__()
def copy_site_conf(self, arg=None):
if arg is None:
arg = self.probe_for_siteconf()
if arg is None:
print('No site.conf found on host.')
return False
target = os.path.join(self.build_dir, 'conf/site.conf')
print("site.conf setup: Copying " + arg + " to " + target)
shutil.copyfile(arg, target)
return True
def probe_for_siteconf(self):
locations = ["~/.site.conf",
"/home/share/tools/yocto/site.conf",
"/etc/yocto/site.conf"]
for l in locations:
if os.path.isfile(os.path.expanduser(l)):
return os.path.expanduser(l)
return None
##############
# Executable #
##############
def main():
"""This script starts the site.conf mechanism and copies the choosen site.conf
in your build/conf directory
"""
parser = argparse.ArgumentParser(description='copy a site.conf into your conf dir')
parser.add_argument('-f', dest='filename', help='set the site.conf file location manually')
args = parser.parse_args()
bsp = BSP_SiteConfLoader()
if not bsp.copy_site_conf(args.filename):
# An error has happened. Report it back to calling program.
sys.exit(1)
if __name__ == "__main__":
main()
|
the-stack_0_8961 | import pytest
from apps.gdpr.utils import account_info_handler
pytestmark = pytest.mark.django_db
def test_account_info_handler(user):
needed_data = {
"email": user.email,
"username": user.username,
"first_name": user.first_name,
"last_name": user.last_name,
"privacy_policy": user.privacy_policy,
"warning_sent_email": user.warning_sent_email,
"account_info_link": user.account_info_link,
"last_account_info_created": None,
"is_staff": user.is_staff,
"is_active": user.is_active,
"date_joined": user.date_joined.strftime("%d/%m/%Y %H:%m:%S"),
"last_login": None,
"last_password_change_date": user.last_password_change_date.strftime(
"%d/%m/%Y %H:%m:%S"
),
}
assert account_info_handler(user) == needed_data
|
the-stack_0_8963 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
from .._internal.client_credential_base import ClientCredentialBase
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any
class ClientSecretCredential(ClientCredentialBase):
"""Authenticates as a service principal using a client secret.
:param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID.
:param str client_id: the service principal's client ID
:param str client_secret: one of the service principal's client secrets
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
:keyword ~azure.identity.RegionalAuthority regional_authority: a :class:`~azure.identity.RegionalAuthority` to
which the credential will authenticate. This argument should be used only by applications deployed to Azure
VMs.
"""
def __init__(self, tenant_id, client_id, client_secret, **kwargs):
# type: (str, str, str, **Any) -> None
if not client_id:
raise ValueError("client_id should be the id of an Azure Active Directory application")
if not client_secret:
raise ValueError("secret should be an Azure Active Directory application's client secret")
if not tenant_id:
raise ValueError(
"tenant_id should be an Azure Active Directory tenant's id (also called its 'directory id')"
)
super(ClientSecretCredential, self).__init__(
client_id=client_id, client_credential=client_secret, tenant_id=tenant_id, **kwargs
)
|
the-stack_0_8964 | import os
MILVUS_TB = "Tables"
MILVUS_TBF = "TableFiles"
METRIC_DIC = {
1: "L2",
2: "IP",
3: "HAMMING",
4: "JACCARD",
5: "TANIMOTO",
6: "SUBSTRUCTURE",
7: "SUPERSTRUCTURE"
}
H2M_YAML = {
'milvus-version': '0.10.5',
'data_path': ['/home/data/data1.hdf5', '/home/data/fdata2.hdf5'],
'dest_host': '127.0.0.1',
'dest_port': 19530,
'mode': 'append',
'dest_collection_name': 'test02',
'dest_partition_name': 'partition_01',
'collection_parameter': {'dimension': 128, 'index_file_size': 1024, 'metric_type': 'IP'},
}
WORK_PATH = os.getenv("MILVUSDM_PATH", (os.path.join(os.environ['HOME'], 'milvusdm')))
IDS_PATH = WORK_PATH + os.sep + 'ids'
LOGS_NUM = os.getenv("logs_num", 0)
|
the-stack_0_8966 | from functions_recorder import load_csv, plot_inputs_vr, plot_inputs_miniscope
import tkFileDialog
from paths import sync_path
from Tkinter import Tk
def get_tk_file(initial_path):
root = Tk()
root.withdraw()
return tkFileDialog.askopenfilenames(initialdir=initial_path, filetypes=(("csv files", "*.csv"),))[0]
# select the sync file to visualize
file_path = get_tk_file(sync_path)
# load the data in the file
sync_data = load_csv(file_path)
# determine whether it's a miniscope or VR file and plot accordingly
if 'syncVR' in file_path:
plot_inputs_vr(sync_data)
else:
plot_inputs_miniscope(sync_data)
# root.destroy()
|
the-stack_0_8967 | import sys
import os
import time
import subprocess
import codecs
# Change these to match your own environment
# Do not make watchfolder = outputfolder
path_to_watch = "\path\of\watchfolder"
path_to_send = "\path\of\outputfolder"
script_to_run = "\path\of\script"
def __main__():
# Create a dictionary of all the files in the watchfolder (a.k.a. path_to_watch)
before = dict([(f, None) for f in os.listdir(path_to_watch)])
while True:
# How many seconds to wait between folder checks - be careful about making this less than 2
time.sleep(5)
# Create a dictionary of all the files in the watchfolder
after = dict([(f, None) for f in os.listdir(path_to_watch)])
# Compare the two lists to find new files
added = [f for f in after if f not in before]
if added:
# print "Added: ", ", ".join(added)
for f in added:
# Create a new deadline job for each new file
CreateAndSubmitJobs(f)
# Here you can add any code to move/delete/etc. the file you just made a job out of
before = after
def CreateAndSubmitJobs(newFile):
"""
Creates a Draft job, using a file named newFile.
"""
# These values are all rough defaults, you may need to change them to match your farm
# Creating the job file programmatically
# http://docs.thinkboxsoftware.com/products/deadline/7.0/1_User%20Manual/manual/manual-submission.html#job-info-file
# This is where your temp files will be placed. You may want to change
# this, as this is assuming a default Windows 10 install of deadline
temp_path = os.path.join(GetCurrentUserHomeDirectory(), "temp")
jobInfoFilename = os.path.join(temp_path,
"draft_job_info.job") # This can be named whatever you wish
writer = open(jobInfoFilename, 'w')
try:
writer.write("Plugin=Draft\n")
writer.write("Name=WatchfolderJob-" + newFile + "\n")
writer.write("Comment=Created automatically by watchfolder.py\n")
# If you've got a specific machine you want to test this locally on,
# set this to that machine
# writer.write("Whitelist=mobile-010\n")
writer.write("OutputDirectory0=%s\n" % path_to_send)
finally:
writer.close()
# Create plugin info file programmatically
# http://docs.thinkboxsoftware.com/products/deadline/7.0/1_User%20Manual/manual/manual-submission.html#plug-in-info-file
# This can be named whatever you wish
pluginInfoFilename = os.path.join(temp_path, "draft_plugin_info.job")
writer = open(pluginInfoFilename, 'w')
try:
# Lots of these are required values, and I've left them blank. They can be
# populated if you choose
writer.write("scriptFile=%s\n" % script_to_run)
writer.write("ScriptArg0=username=\"\"\n")
writer.write("ScriptArg1=entity=\"\"\n")
writer.write("ScriptArg2=version=\"\"\n")
writer.write("ScriptArg3=frameList=\n")
writer.write("ScriptArg4=outFolder=%s\n" % path_to_send)
writer.write("ScriptArg5=outFile=%s\n" % os.path.join(path_to_send, newFile))
writer.write("ScriptArg6=inFile=%s\n" % os.path.join(path_to_watch, newFile))
finally:
writer.close()
# Setup the command line arguments.
SubmitJobs(jobInfoFilename, pluginInfoFilename)
def SubmitJobs(file1, file2):
"""
Wrapper for CallDeadlineCommand to make creating jobs simpler
"""
print(CallDeadlineCommand([file1, file2]))
def GetCurrentUserHomeDirectory():
output = CallDeadlineCommand(["-GetCurrentUserHomeDirectory"])
return output.replace("\r", "").replace("\n", "").replace("\\", os.sep)
def GetRepositoryRoot():
output = CallDeadlineCommand(['-root'])
return output.replace("\r", "").replace("\n", "").replace("\\", os.sep)
def CallDeadlineCommand(args):
"""
Calls deadlinecommand with arguments as passed args with 'deadlinecommand' as the first argument
"""
# On OSX, we look for the DEADLINE_PATH file. On other platforms, we use
# the environment variable.
if os.path.exists("/Users/Shared/Thinkbox/DEADLINE_PATH"):
with open("/Users/Shared/Thinkbox/DEADLINE_PATH") as f:
deadlineBin = f.read().strip()
deadlineCommand = "%s/deadlinecommand" % deadlineBin
else:
try:
deadlineBin = os.environ['DEADLINE_PATH']
except KeyError:
return ""
if os.name == 'nt':
deadlineCommand = "%s\\deadlinecommand.exe" % deadlineBin
else:
deadlineCommand = "%s/deadlinecommand" % deadlineBin
# insert deadlineCommand as the first argument
args.insert(0, deadlineCommand)
# Specifying PIPE for all handles to workaround a Python bug on Windows.
# The unused handles are then closed immediatley afterwards.
proc = subprocess.Popen(
args,
cwd=deadlineBin,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=None)
proc.stdin.close()
proc.stderr.close()
output = proc.stdout.read()
output = output.decode("utf_8")
return output
if __name__ == "__main__":
__main__()
|
the-stack_0_8968 | # Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import os.path
import shutil
import subprocess
import tempfile
import pytest
import yaml
from ludwig.constants import TRAINER
from tests.integration_tests.utils import category_feature, generate_data, sequence_feature
def _run_commands(commands, **ludwig_kwargs):
for arg_name, value in ludwig_kwargs.items():
commands += ["--" + arg_name, value]
cmdline = " ".join(commands)
print(cmdline)
completed_process = subprocess.run(cmdline, shell=True, stdout=subprocess.PIPE, env=os.environ.copy())
assert completed_process.returncode == 0
return completed_process
def _run_ludwig(command, **ludwig_kwargs):
commands = ["ludwig", command]
return _run_commands(commands, **ludwig_kwargs)
def _run_ludwig_horovod(command, **ludwig_kwargs):
commands = ["horovodrun", "-np", "2", "ludwig", command]
return _run_commands(commands, **ludwig_kwargs)
def _prepare_data(csv_filename, config_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=2, reduce_input="sum")]
# Generate test data
dataset_filename = generate_data(input_features, output_features, csv_filename)
# generate config file
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
TRAINER: {"epochs": 2},
}
with open(config_filename, "w") as f:
yaml.dump(config, f)
return dataset_filename
def _prepare_hyperopt_data(csv_filename, config_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=2, reduce_input="sum")]
# Generate test data
dataset_filename = generate_data(input_features, output_features, csv_filename)
# generate config file
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 4},
TRAINER: {"epochs": 2},
"hyperopt": {
"parameters": {
"trainer.learning_rate": {
"type": "float",
"low": 0.0001,
"high": 0.01,
"space": "log",
"steps": 3,
}
},
"goal": "minimize",
"output_feature": output_features[0]["name"],
"validation_metrics": "loss",
"executor": {"type": "serial"},
"sampler": {"type": "random", "num_samples": 2},
},
}
with open(config_filename, "w") as f:
yaml.dump(config, f)
return dataset_filename
@pytest.mark.distributed
def test_train_cli_dataset(csv_filename):
"""Test training using `ludwig train --dataset`."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
@pytest.mark.distributed
def test_train_cli_training_set(csv_filename):
"""Test training using `ludwig train --training_set`."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
validation_filename = shutil.copyfile(dataset_filename, os.path.join(tmpdir, "validation.csv"))
test_filename = shutil.copyfile(dataset_filename, os.path.join(tmpdir, "test.csv"))
_run_ludwig(
"train",
training_set=dataset_filename,
validation_set=validation_filename,
test_set=test_filename,
config=config_filename,
output_directory=tmpdir,
)
@pytest.mark.distributed
def test_train_cli_horovod(csv_filename):
"""Test training using `horovodrun -np 2 ludwig train --dataset`."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig_horovod(
"train",
dataset=dataset_filename,
config=config_filename,
output_directory=tmpdir,
experiment_name="horovod_experiment",
)
# Check that `model_load_path` works correctly
_run_ludwig_horovod(
"train",
dataset=dataset_filename,
config=config_filename,
output_directory=tmpdir,
model_load_path=os.path.join(tmpdir, "horovod_experiment_run", "model"),
)
@pytest.mark.skip(reason="Issue #1451: Use torchscript.")
@pytest.mark.distributed
def test_export_savedmodel_cli(csv_filename):
"""Test exporting Ludwig model to Tensorflows savedmodel format."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
_run_ludwig(
"export_savedmodel",
model=os.path.join(tmpdir, "experiment_run", "model"),
output_path=os.path.join(tmpdir, "savedmodel"),
)
@pytest.mark.skip(reason="Issue #1451: Use torchscript.")
@pytest.mark.distributed
def test_export_neuropod_cli(csv_filename):
"""Test exporting Ludwig model to neuropod format."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
_run_ludwig(
"export_neuropod",
model=os.path.join(tmpdir, "experiment_run", "model"),
output_path=os.path.join(tmpdir, "neuropod"),
)
@pytest.mark.distributed
def test_experiment_cli(csv_filename):
"""Test experiment cli."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("experiment", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
@pytest.mark.distributed
def test_predict_cli(csv_filename):
"""Test predict cli."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
_run_ludwig(
"predict",
dataset=dataset_filename,
model=os.path.join(tmpdir, "experiment_run", "model"),
output_directory=os.path.join(tmpdir, "predictions"),
)
@pytest.mark.distributed
def test_evaluate_cli(csv_filename):
"""Test evaluate cli."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
_run_ludwig(
"evaluate",
dataset=dataset_filename,
model=os.path.join(tmpdir, "experiment_run", "model"),
output_directory=os.path.join(tmpdir, "predictions"),
)
@pytest.mark.distributed
def test_hyperopt_cli(csv_filename):
"""Test hyperopt cli."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_hyperopt_data(csv_filename, config_filename)
_run_ludwig("hyperopt", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
@pytest.mark.distributed
def test_visualize_cli(csv_filename):
"""Test Ludwig 'visualize' cli."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
_run_ludwig(
"visualize",
visualization="learning_curves",
model_names="run",
training_statistics=os.path.join(tmpdir, "experiment_run", "training_statistics.json"),
output_directory=os.path.join(tmpdir, "visualizations"),
)
@pytest.mark.distributed
def test_collect_summary_activations_weights_cli(csv_filename):
"""Test collect_summary cli."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("train", dataset=dataset_filename, config=config_filename, output_directory=tmpdir)
completed_process = _run_ludwig("collect_summary", model=os.path.join(tmpdir, "experiment_run", "model"))
stdout = completed_process.stdout.decode("utf-8")
assert "Modules" in stdout
assert "Parameters" in stdout
@pytest.mark.distributed
def test_synthesize_dataset_cli(csv_filename):
"""Test synthesize_data cli."""
with tempfile.TemporaryDirectory() as tmpdir:
# test depends on default setting of --dataset_size
# if this parameter is specified, _run_ludwig fails when
# attempting to build the cli parameter structure
_run_ludwig(
"synthesize_dataset",
output_path=os.path.join(tmpdir, csv_filename),
features="'[ \
{name: text, type: text}, \
{name: category, type: category}, \
{name: number, type: number}, \
{name: binary, type: binary}, \
{name: set, type: set}, \
{name: bag, type: bag}, \
{name: sequence, type: sequence}, \
{name: timeseries, type: timeseries}, \
{name: date, type: date}, \
{name: h3, type: h3}, \
{name: vector, type: vector}, \
{name: audio, type: audio}, \
{name: image, type: image} \
]'",
)
@pytest.mark.distributed
def test_preprocess_cli(csv_filename):
"""Test preprocess `ludwig preprocess."""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, "config.yaml")
dataset_filename = _prepare_data(csv_filename, config_filename)
_run_ludwig("preprocess", dataset=dataset_filename, preprocessing_config=config_filename)
|
the-stack_0_8970 | """
File: anagram.py
Name: Jason Huang
----------------------------------
This program recursively finds all the anagram(s)
for the word input by user and terminates when the
input string matches the EXIT constant defined
at line 19
If you correctly implement this program, you should see the
number of anagrams for each word listed below:
* arm -> 3 anagrams
* contains -> 5 anagrams
* stop -> 6 anagrams
* tesla -> 10 anagrams
* spear -> 12 anagrams
"""
# Constants
FILE = 'dictionary.txt' # This is the filename of an English dictionary
EXIT = '-1' # Controls when to stop the loop
result_list = []
dictionary = []
def main():
# This is the program to find the anagram in dictionary
global result_list
while True:
result_list = []
print(f'Welcome to stanCode \"Anagram Generator\" (or {EXIT} to quit)')
s = input(str('Find anagrams for:'))
if s == EXIT:
break
else:
read_dictionary()
find_anagrams(s)
def read_dictionary():
# This function is to add the raw material of dictionary in the list.
with open(FILE, 'r') as f:
for line in f:
line = line.strip()
dictionary.append(line)
def find_anagrams(s):
"""
:param s: the word which is the word user want to find the anagram in the dictionary using this program
:return: list, all of the anagrams
"""
word = []
find_anagrams_helper(s, word)
print(f'{len(result_list)} anagrams: {result_list}')
def find_anagrams_helper(s, word):
"""
this is the helper program to support find_anagrams(s).
:param s: the word which is the word user want to find the anagram in the dictionary using this program
:param word: the list which will collect the index of the letter in s
:return: list, anagrams, the anagrams of s.
"""
if len(word) == len(s):
result = ''
for index in word:
result += s[index]
if result in dictionary:
if result not in result_list:
print('Searching...')
print(f'Found: \'{result}\' in dictionary..')
result_list.append(result)
else:
for i in range(len(s)):
if i not in word:
# choose
word.append(i)
# explore
find_anagrams_helper(s, word)
# un-choose
word.pop()
def has_prefix(sub_s):
"""
This program is to pre-check whether the word prefix is in the dictionary
:param sub_s: the prefix of string formulated by the word index.
:return: boolean, True or False
"""
read_dictionary()
bool_list = []
for word in dictionary:
if word.startswith(sub_s):
bool_list.append(1)
else:
bool_list.append(0)
if 1 in bool_list:
return True
return False
if __name__ == '__main__':
main()
|
the-stack_0_8971 | import operator
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesAnalytics:
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
).T
# Series @ DataFrame -> Series
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# DataFrame @ Series -> Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# Series @ Series -> scalar
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# vector (1D np.array) @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# vector (1D list) @ Series (__rmatmul__)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# matrix (2D np.array) @ Series (__rmatmul__)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# matrix (2D nested lists) @ Series (__rmatmul__)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a["p"] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
def test_ptp(self):
# GH21614
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
def test_repeat(self):
s = Series(np.random.randn(3), index=["a", "b", "c"])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
tm.assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep))
tm.assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name="x")
expected = Series(s.values.repeat(2), name="x", index=s.index.values.repeat(2))
tm.assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(s, 2, axis=0)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range("20130101", periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
|
the-stack_0_8972 | #!/usr/bin/env python
# This example demonstrates the use of multiline 2D text using
# vtkTextMappers. It shows several justifications as well as
# single-line and multiple-line text inputs.
import vtk
font_size = 14
# Create the text mappers and the associated Actor2Ds.
# The font and text properties (except justification) are the same for
# each single line mapper. Let's create a common text property object
singleLineTextProp = vtk.vtkTextProperty()
singleLineTextProp.SetFontSize(font_size)
singleLineTextProp.SetFontFamilyToArial()
singleLineTextProp.BoldOff()
singleLineTextProp.ItalicOff()
singleLineTextProp.ShadowOff()
# The font and text properties (except justification) are the same for
# each multi line mapper. Let's create a common text property object
multiLineTextProp = vtk.vtkTextProperty()
multiLineTextProp.ShallowCopy(singleLineTextProp)
multiLineTextProp.BoldOn()
multiLineTextProp.ItalicOn()
multiLineTextProp.ShadowOn()
multiLineTextProp.SetLineSpacing(0.8)
# The text is on a single line and bottom-justified.
singleLineTextB = vtk.vtkTextMapper()
singleLineTextB.SetInput("Single line (bottom)")
tprop = singleLineTextB.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToBottom()
tprop.SetColor(1, 0, 0)
singleLineTextActorB = vtk.vtkActor2D()
singleLineTextActorB.SetMapper(singleLineTextB)
singleLineTextActorB.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorB.GetPositionCoordinate().SetValue(0.05, 0.85)
# The text is on a single line and center-justified (vertical
# justification).
singleLineTextC = vtk.vtkTextMapper()
singleLineTextC.SetInput("Single line (centered)")
tprop = singleLineTextC.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(0, 1, 0)
singleLineTextActorC = vtk.vtkActor2D()
singleLineTextActorC.SetMapper(singleLineTextC)
singleLineTextActorC.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorC.GetPositionCoordinate().SetValue(0.05, 0.75)
# The text is on a single line and top-justified.
singleLineTextT = vtk.vtkTextMapper()
singleLineTextT.SetInput("Single line (top)")
tprop = singleLineTextT.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToTop()
tprop.SetColor(0, 0, 1)
singleLineTextActorT = vtk.vtkActor2D()
singleLineTextActorT.SetMapper(singleLineTextT)
singleLineTextActorT.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorT.GetPositionCoordinate().SetValue(0.05, 0.65)
# The text is on multiple lines and left- and top-justified.
textMapperL = vtk.vtkTextMapper()
textMapperL.SetInput("This is\nmulti-line\ntext output\n(left-top)")
tprop = textMapperL.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToLeft()
tprop.SetVerticalJustificationToTop()
tprop.SetColor(1, 0, 0)
textActorL = vtk.vtkActor2D()
textActorL.SetMapper(textMapperL)
textActorL.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorL.GetPositionCoordinate().SetValue(0.05, 0.5)
# The text is on multiple lines and center-justified (both horizontal and
# vertical).
textMapperC = vtk.vtkTextMapper()
textMapperC.SetInput("This is\nmulti-line\ntext output\n(centered)")
tprop = textMapperC.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToCentered()
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(0, 1, 0)
textActorC = vtk.vtkActor2D()
textActorC.SetMapper(textMapperC)
textActorC.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorC.GetPositionCoordinate().SetValue(0.5, 0.5)
# The text is on multiple lines and right- and bottom-justified.
textMapperR = vtk.vtkTextMapper()
textMapperR.SetInput("This is\nmulti-line\ntext output\n(right-bottom)")
tprop = textMapperR.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToRight()
tprop.SetVerticalJustificationToBottom()
tprop.SetColor(0, 0, 1)
textActorR = vtk.vtkActor2D()
textActorR.SetMapper(textMapperR)
textActorR.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorR.GetPositionCoordinate().SetValue(0.95, 0.5)
# Draw the grid to demonstrate the placement of the text.
# Set up the necessary points.
Pts = vtk.vtkPoints()
Pts.InsertNextPoint(0.05, 0.0, 0.0)
Pts.InsertNextPoint(0.05, 1.0, 0.0)
Pts.InsertNextPoint(0.5, 0.0, 0.0)
Pts.InsertNextPoint(0.5, 1.0, 0.0)
Pts.InsertNextPoint(0.95, 0.0, 0.0)
Pts.InsertNextPoint(0.95, 1.0, 0.0)
Pts.InsertNextPoint(0.0, 0.5, 0.0)
Pts.InsertNextPoint(1.0, 0.5, 0.0)
Pts.InsertNextPoint(0.00, 0.85, 0.0)
Pts.InsertNextPoint(0.50, 0.85, 0.0)
Pts.InsertNextPoint(0.00, 0.75, 0.0)
Pts.InsertNextPoint(0.50, 0.75, 0.0)
Pts.InsertNextPoint(0.00, 0.65, 0.0)
Pts.InsertNextPoint(0.50, 0.65, 0.0)
# Set up the lines that use these points.
Lines = vtk.vtkCellArray()
Lines.InsertNextCell(2)
Lines.InsertCellPoint(0)
Lines.InsertCellPoint(1)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(2)
Lines.InsertCellPoint(3)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(4)
Lines.InsertCellPoint(5)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(6)
Lines.InsertCellPoint(7)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(8)
Lines.InsertCellPoint(9)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(10)
Lines.InsertCellPoint(11)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(12)
Lines.InsertCellPoint(13)
# Create a grid that uses these points and lines.
Grid = vtk.vtkPolyData()
Grid.SetPoints(Pts)
Grid.SetLines(Lines)
# Set up the coordinate system.
normCoords = vtk.vtkCoordinate()
normCoords.SetCoordinateSystemToNormalizedViewport()
# Set up the mapper and actor (2D) for the grid.
mapper = vtk.vtkPolyDataMapper2D()
mapper.SetInputData(Grid)
mapper.SetTransformCoordinate(normCoords)
gridActor = vtk.vtkActor2D()
gridActor.SetMapper(mapper)
gridActor.GetProperty().SetColor(0.1, 0.1, 0.1)
# Create the Renderer, RenderWindow, and RenderWindowInteractor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer; set the background and size; zoom in
# closer to the image; render
ren.AddActor2D(textActorL)
ren.AddActor2D(textActorC)
ren.AddActor2D(textActorR)
ren.AddActor2D(singleLineTextActorB)
ren.AddActor2D(singleLineTextActorC)
ren.AddActor2D(singleLineTextActorT)
ren.AddActor2D(gridActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(500, 300)
ren.GetActiveCamera().Zoom(1.5)
iren.Initialize()
renWin.Render()
iren.Start()
|
the-stack_0_8974 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 10:09:24 2021
@author: jbt5jf
TESTING SCRIPT for the neural network
"""
import matplotlib.pyplot as plt
import numpy as np
import imageio
from skimage.transform import resize
import tqdm
import cv2
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers import *
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import numpy as np
from videos import sepVideos
import os
from Dataset import Dataset
#CHECK IF model is actaully saving correctly
def testModel(model, path = "./mouse_heart/"):
input_folder = path
test = Dataset('.')
videos = [f for f in os.listdir(input_folder) if os.path.isfile(input_folder+f) and f[-3:]=='avi']
#for video in videos:
video = videos[3]
print(video)
if not os.path.exists(input_folder+video): os.makedirs(input_folder+video)
print('Spliting', video, '...')
x = sepVideos(video, save=False, resize=(128,128))
print(x.shape)
segnet = tf.keras.models.load_model('2021-10-25_17-02-21model'+'.h5')
for i in range(test.shape[0]):
img, mask = test[i]
pred = segnet.predict(img.reshape(128,128,1)[tf.newaxis,...])
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(img)
ax2.imshow(pred.reshape(128,128))
plt.show()
break
#Try the network on the video
def testVideo(model, path = "./mouse_heart/"):
input_folder = path
test = Dataset('.')
videos = [f for f in os.listdir(input_folder) if os.path.isfile(input_folder+f) and f[-3:]=='avi']
for video in videos:
print(video)
if not os.path.exists(input_folder+video): os.makedirs(input_folder+video)
print('Spliting', video, '...')
x = sepVideos(video, save=False, resize=(128,128))
#print(x.shape)
segnet = tf.keras.models.load_model(model)
pred = segnet.predict(x.reshape(-1,128,128,1)).reshape(-1,128,128)
""" DEBUG STUFF
pred = segnet.predict(img.reshape(128,128,1)[tf.newaxis,...])
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(img)
ax2.imshow(pred.reshape(128,128))
"""
size = 128, 128*2 # Height, Width
fps = 10
print(pred.shape)
out = cv2.VideoWriter(f"{video.split('.')[0]}_segmented.mp4", cv2.VideoWriter_fourcc(*'mp4v'), fps, (size[1], size[0]), False)
for i in range(pred.shape[0]):
test = np.concatenate([x[i], pred[i]*255], axis=1).astype('uint8')
out.write(test)
out.release()
break
if __name__ == "__main__":
keras.backend.clear_session()
model = '2021-10-26_12-18-12model'+'.h5'
testModel(model)
testVideo(model)
|
the-stack_0_8975 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
https://tools.ietf.org/html/rfc7231#section-7.1.4
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.encoding import iri_to_uri
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
from django.utils.log import log_response
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.get('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
try:
return int(cc['max-age'])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming:
response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest())
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
'Precondition Failed: %s', request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
# Last-Modified.
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
if header in response:
new_response[header] = response[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if_unmodified_since = if_unmodified_since and parse_http_date_safe(if_unmodified_since)
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (not if_match_etags and if_unmodified_since and
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (not if_none_match_etags and if_modified_since and
not _if_modified_since_passes(last_modified, if_modified_since)):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith('W/'):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip('W/')
etags = (etag.strip('W/') for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
cache_key += '.%s' % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(value.encode())
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
the-stack_0_8977 | from django.core.management.base import BaseCommand, CommandError
from ark.transactions import TxBroadcaster
class Command(BaseCommand):
help = 'start/stop a TxBroadcaster'
def add_arguments(self, parser):
parser.add_argument('uid', nargs=1, type=int)
parser.add_argument('network', nargs=1, type=str)
def handle(self, *args, **options):
self.stdout.write('creating TxBroadcaster: {uid}, network: {network}'.format(
uid=options['uid'][0],
network=options['network'][0]))
caster = TxBroadcaster(uid=options['uid'][0], network=options['network'][0])
self.stdout.write('created successfully')
self.stdout.write('starting TxBroadcaster: {}'.format(options['uid'][0]))
caster.run()
|
the-stack_0_8983 | """Logging utilities."""
import asyncio
from asyncio.events import AbstractEventLoop
from functools import partial, wraps
import inspect
import logging
import threading
import traceback
from typing import Any, Callable, Coroutine, Optional
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text: str) -> None:
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record: logging.LogRecord) -> bool:
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, "*******")
return True
# pylint: disable=invalid-name
class AsyncHandler:
"""Logging handler wrapper to add an async layer."""
def __init__(self, loop: AbstractEventLoop, handler: logging.Handler) -> None:
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue: asyncio.Queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self) -> None:
"""Wrap close to handler."""
self.emit(None)
async def async_close(self, blocking: bool = False) -> None:
"""Close the handler.
When blocking=True, will wait till closed.
"""
await self._queue.put(None)
if blocking:
while self._thread.is_alive():
await asyncio.sleep(0)
def emit(self, record: Optional[logging.LogRecord]) -> None:
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self) -> str:
"""Return the string names."""
return str(self.handler)
def _process(self) -> None:
"""Process log in a thread."""
try:
while True:
record = asyncio.run_coroutine_threadsafe(
self._queue.get(), self.loop
).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
except asyncio.CancelledError:
self.handler.close()
def createLock(self) -> None:
"""Ignore lock stuff."""
pass
def acquire(self) -> None:
"""Ignore lock stuff."""
pass
def release(self) -> None:
"""Ignore lock stuff."""
pass
@property
def level(self) -> int:
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self) -> Optional[logging.Formatter]:
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self) -> str:
"""Wrap property set_name to handler."""
return self.handler.get_name() # type: ignore
@name.setter
def name(self, name: str) -> None:
"""Wrap property get_name to handler."""
self.handler.set_name(name) # type: ignore
def catch_log_exception(
func: Callable[..., Any], format_err: Callable[..., Any], *args: Any
) -> Callable[[], None]:
"""Decorate a callback to catch and log exceptions."""
def log_exception(*args: Any) -> None:
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
# Check for partials to properly determine if coroutine function
check_func = func
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
await func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = async_wrapper
else:
@wraps(func)
def wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = wrapper
return wrapper_func
def catch_log_coro_exception(
target: Coroutine[Any, Any, Any], format_err: Callable[..., Any], *args: Any
) -> Coroutine[Any, Any, Any]:
"""Decorate a coroutine to catch and log exceptions."""
async def coro_wrapper(*args: Any) -> Any:
"""Catch and log exception."""
try:
return await target
except Exception: # pylint: disable=broad-except
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
return None
return coro_wrapper()
def async_create_catching_coro(target: Coroutine) -> Coroutine:
"""Wrap a coroutine to catch and log exceptions.
The exception will be logged together with a stacktrace of where the
coroutine was wrapped.
target: target coroutine.
"""
trace = traceback.extract_stack()
wrapped_target = catch_log_coro_exception(
target,
lambda *args: "Exception in {} called from\n {}".format(
target.__name__, # type: ignore
"".join(traceback.format_list(trace[:-1])),
),
)
return wrapped_target
|
the-stack_0_8984 | from netaddr import IPAddress
__all__ = [
'to_server_dict',
'to_dns_zone_dict',
'to_dns_record_dict'
]
def to_server_dict(server):
public_ips = [ip['addr'] for ip in server.addresses['public']]
private_ips = [ip['addr'] for ip in server.addresses['private']]
# Pick out first public IPv4 and IPv6 address
public_ipv4 = None
public_ipv6 = None
for ip in public_ips:
try:
ip_obj = IPAddress(ip)
except Exception:
continue
if not ip_obj.is_private():
if ip_obj.version == 4:
public_ipv4 = ip
elif ip_obj.version == 6:
public_ipv6 = ip
result = {
'id': server.id,
'name': server.name,
'status': server.status,
'image_id': server.image['id'],
'flavor_id': server.flavor['id'],
'public_ips': public_ips,
'private_ips': private_ips,
'public_ipv4': public_ipv4,
'public_ipv6': public_ipv6,
'key_name': server.key_name,
'metadata': server.metadata
}
return result
def to_dns_zone_dict(zone):
result = {
'id': zone.id,
'name': zone.name,
'email_address': zone.emailAddress,
'ttl': zone.ttl
}
return result
def to_dns_record_dict(record):
result = {
'id': record.id,
'name': record.name,
'type': record.type,
'data': record.data,
'ttl': record.ttl
}
return result
|
the-stack_0_8987 | import IPython
import numpy as np
import pandas as pd
def display(*dfs, head: bool = True):
"""Display the dataframes in _dfs_"""
for df in dfs:
IPython.display.display(df.head() if head else df)
def reduce_mem_usage(df: pd.DataFrame, verbose: bool = False) -> pd.DataFrame:
"""Efficiently manage the memory usage of _df_"""
if verbose:
start_mem = df.memory_usage().sum() / 1024 ** 2
print("~> Memory usage of dataframe is {:.3f} MG".format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int" or np.all(np.mod(df[col], 1) == 0):
# Booleans mapped to integers
if list(df[col].unique()) == [1, 0]:
df[col] = df[col].astype(bool)
elif c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.uint8).min and c_max < np.iinfo(np.uint8).max:
df[col] = df[col].astype(np.uint8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.uint16).min and c_max < np.iinfo(np.uint16).max
):
df[col] = df[col].astype(np.uint16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.uint32).min and c_max < np.iinfo(np.uint32).max
):
df[col] = df[col].astype(np.uint32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
elif (
c_min > np.iinfo(np.uint64).min and c_max < np.iinfo(np.uint64).max
):
df[col] = df[col].astype(np.uint64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
pass
if verbose:
end_mem = df.memory_usage().sum() / 1024 ** 2
print("~> Memory usage after optimization is: {:.3f} MG".format(end_mem))
print("~> Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
print("---" * 20)
return df
def extract_num(ser: pd.Series) -> pd.Series:
"""Extract the numerical value from a string"""
return ser.str.extract(r"(\d+)").astype(np.int16)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.