filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_13450 | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton
from PyQt5.QtGui import QIcon
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(100, 200, 300, 400)
self.setWindowTitle("PyQt")
self.setWindowIcon(QIcon("icon/graph.png"))
btn = QPushButton("버튼1", self)
btn.move(10, 10)
btn2 = QPushButton("버튼2", self)
btn2.move(10, 40)
app = QApplication(sys.argv)
window = MyWindow()
window.show()
app.exec_()
|
the-stack_0_13451 | from unittest import TestCase, main
from expects import expect, equal
from twin_sister.injection.dependency_context import DependencyContext
class TestSetEnv(TestCase):
def test_can_set_arbitrary_var(self):
key = 'some_key'
value = 'some_value'
context = DependencyContext(supply_env=True)
context.set_env(**{key: value})
expect(context.os.environ[key]).to(equal(value))
def test_can_set_multiple_vars(self):
k1 = 'doe'
v1 = 'a deer, a female deer'
k2 = 'ray'
v2 = 'a drop of golden sun'
context = DependencyContext(supply_env=True)
context.set_env(**{k1: v1, k2: v2})
expect(context.os.environ[k1]).to(equal(v1))
expect(context.os.environ[k2]).to(equal(v2))
def test_can_replace_existing_var(self):
key = 'quokka'
old = 'old value'
new = 'new value'
context = DependencyContext(supply_env=True)
context.set_env(**{key: old})
context.set_env(**{key: new})
expect(context.os.environ[key]).to(equal(new))
def test_does_not_affect_unspecified_var(self):
existing_key = 'dog_milk'
existing_value = 'Lasts longer than any other milk'
context = DependencyContext(supply_env=True)
context.os.environ[existing_key] = existing_value
context.set_env(goat_milk='and little lambs eat ivy')
expect(context.os.environ[existing_key]).to(
equal(existing_value))
def test_converts_number_to_string(self):
context = DependencyContext(supply_env=True)
context.set_env(n=13)
expect(context.os.environ['n']).to(equal(str(13)))
def test_converts_bool_to_string(self):
context = DependencyContext(supply_env=True)
context.set_env(n=False)
expect(context.os.environ['n']).to(equal(str(False)))
def test_converts_arbitrary_rubbish_to_string(self):
context = DependencyContext(supply_env=True)
rubbish = {'menu': ['spam', 'eggs', 'sausage', 'biggles']}
context.set_env(rubbish=rubbish)
expect(context.os.environ['rubbish']).to(
equal(str(rubbish)))
def test_complains_if_env_not_supplied(self):
context = DependencyContext()
try:
context.set_env(things='0')
assert False, 'No exception was raised'
except RuntimeError:
pass
if '__main__' == __name__:
main()
|
the-stack_0_13455 | import os
import sys
from config import cfg
import argparse
import torch
from torch.backends import cudnn
import torchvision.transforms as T
from PIL import Image
sys.path.append('.')
from utils.logger import setup_logger
from model import make_model
import numpy as np
import cv2
from utils.metrics import cosine_similarity
def visualizer(test_img, camid, top_k = 10, img_size=[128,128]):
figure = np.asarray(query_img.resize((img_size[1],img_size[0])))
for k in range(top_k):
name = str(indices[0][k]).zfill(6)
img = np.asarray(Image.open(img_path[indices[0][k]]).resize((img_size[1],img_size[0])))
figure = np.hstack((figure, img))
title=name
figure = cv2.cvtColor(figure,cv2.COLOR_BGR2RGB)
if not os.path.exists(cfg.OUTPUT_DIR+ "/results/"):
print('create a new folder named results in {}'.format(cfg.OUTPUT_DIR))
os.makedirs(cfg.OUTPUT_DIR+ "/results")
cv2.imwrite(cfg.OUTPUT_DIR+ "/results/{}-cam{}.png".format(test_img,camid),figure)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="./configs/Market1501.yaml", help="path to config file", type=str
)
args = parser.parse_args()
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.freeze()
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
model = make_model(cfg, 255)
model.load_param(cfg.TEST.TEST_WEIGHT)
device = 'cuda'
model = model.to(device)
transform = T.Compose([
T.Resize(cfg.DATA.INPUT_SIZE),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
logger = setup_logger('{}.test'.format(cfg.PROJECT_NAME), cfg.OUTPUT_DIR, if_train=False)
model.eval()
for test_img in os.listdir(cfg.TEST.QUERY_DIR):
logger.info('Finding ID {} ...'.format(test_img))
gallery_feats = torch.load(cfg.OUTPUT_DIR + '/gfeats.pth')
img_path = np.load(cfg.OUTPUT_DIR +'/imgpath.npy')
print(gallery_feats.shape, len(img_path))
query_img = Image.open(cfg.TEST.QUERY_DIR + test_img)
input = torch.unsqueeze(transform(query_img), 0)
input = input.to(device)
with torch.no_grad():
query_feat = model(input)
dist_mat = cosine_similarity(query_feat, gallery_feats)
indices = np.argsort(dist_mat, axis=1)
visualizer(test_img, camid='mixed', top_k=10, img_size=cfg.DATA.INPUT_SIZE) |
the-stack_0_13458 | """Create LM input function for TPUEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
# pylint: disable=g-import-not-at-top
try:
from google3.experimental.users.zihangd.pretrain.data_utils import type_cast
from google3.experimental.users.zihangd.pretrain.data_utils import sparse_to_dense
except ImportError as e:
from data_utils import type_cast
from data_utils import sparse_to_dense
# pylint: enable=g-import-not-at-top
FLAGS = flags.FLAGS
def lm_process(dataset, seq_len, use_bfloat16):
"""Turn a dataset of doc tfrecords into a dataset of chunked seqeuences."""
# Flatten the original dataset into a continuous stream and then chunk the
# continuous stream into segments of fixed length `seq_len`
dataset = dataset.unbatch().repeat()
# Each window has one more element so that we can split inputs & target.
# Meanwhile, we only shift `seq_len` positions.
# Example:
# tf.data.Dataset.range(7).window(size=3, shift=2) produces
# { {0, 1, 2}, {2, 3, 4}, {4, 5, 6}}
window_size = seq_len + 1
dataset = dataset.window(size=window_size, shift=seq_len)
def window_to_tensor(example):
"""Converts a dataset of (nested) windows to one of (nested) tensors."""
new_example = {}
for k, v in example.items():
# Here, v is a "window", i.e. a finite sized dataset, that contains
# "window_size" tensors of shape [] tensors.
# Hence, `v.batch(window_size)` returns a new dataset `u` that contains
# "one single" tensor of shape [window_size].
# Then, `get_single_elment` simply gets "the single tensor" out from `u`
u = v.batch(window_size)
element = tf.data.experimental.get_single_element(u)
new_example[k] = element
return new_example
dataset = dataset.map(window_to_tensor)
def split_inp_and_tgt(example):
"""Split inputs and target from the windowed seq and set shape & type."""
inputs = example.pop("inputs")
for k in example.keys():
example[k] = example[k][:seq_len]
example["inputs"] = inputs[:seq_len]
example["target"] = inputs[1:seq_len+1]
for k in example.keys():
example[k].set_shape((seq_len))
# type cast for example
type_cast(example, use_bfloat16)
return example
dataset = dataset.map(split_inp_and_tgt)
return dataset
def get_record_parser(offline_pos):
"""Config tfrecord parser."""
def parser(record):
"""function used to parse tfrecord."""
record_spec = {
"inputs": tf.VarLenFeature(tf.int64),
"type_id": tf.FixedLenFeature([1], tf.int64),
}
if offline_pos:
record_spec["pos_seq"] = tf.VarLenFeature(tf.int64)
# retrieve serialized example
example = tf.parse_single_example(
serialized=record,
features=record_spec)
inputs = example["inputs"]
inp_len = tf.shape(inputs)[0]
# expand type id to full length
example["type_id"] = tf.broadcast_to(example["type_id"], [inp_len])
if not offline_pos:
# generate position sequence online
example["pos_seq"] = tf.range(inp_len)
# convert all sparse example to dense
example = sparse_to_dense(example)
return example
return parser
def parse_record(dataset,
parser,
is_training,
num_threads=64,
file_shuffle_size=None,
record_shuffle_size=None):
"""Parse tfrecords in a dataset."""
if is_training:
# file-level shuffle
if file_shuffle_size and file_shuffle_size > 1:
tf.logging.info("File level shuffle with size %d", file_shuffle_size)
dataset = dataset.shuffle(file_shuffle_size)
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(8, file_shuffle_size)
tf.logging.info("Interleave %d files", cycle_length)
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=True,
cycle_length=cycle_length))
if record_shuffle_size and record_shuffle_size > 1:
tf.logging.info("Record level shuffle with size %d",
record_shuffle_size)
dataset = dataset.shuffle(buffer_size=record_shuffle_size)
dataset = dataset.map(parser, num_parallel_calls=num_threads)
dataset = dataset.cache().repeat()
else:
dataset = tf.data.TFRecordDataset(dataset)
dataset = dataset.map(parser)
return dataset
def sent_lm_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=4096,
sequence_shuffle_size=2048):
"""Get sentence level LM dataset."""
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(offline_pos=False),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = lm_process(dataset, seq_len, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def semidoc_lm_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256,
sequence_shuffle_size=2048):
# pylint: disable=g-doc-args
"""Get semi-doc level LM dataset.
Notes:
- Each sequence comes from the same document (except for boundary cases).
This is different from the standard sent-level LM dataset.
- No consecutivity is ensured across batches, which is different from the
standard doc-level LM dataset.
- Effectively, semi-doc dataset maintains short range (seq_len) dependency,
which is more random than doc-level and less random than sent-level.
Returns:
a tf.data.Dataset
"""
# pylint: enable=g-doc-args
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(offline_pos=True),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = lm_process(dataset, seq_len, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def doc_lm_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256):
"""Get document level LM dataset."""
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Create dataset from file_paths
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
if len(file_paths) // bsz_per_core >= 2:
##### Enough input files, so do file-level sharding shard
tf.logging.info("Shard first")
# Split the dataset into `bsz_per_core` disjoint shards
shards = [dataset.shard(bsz_per_core, i) for i in range(bsz_per_core)]
# Parse records
file_shuffle_size = (len(file_paths) + bsz_per_core - 1) // bsz_per_core
parse_shard = functools.partial(
parse_record,
parser=get_record_parser(offline_pos=True),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=file_shuffle_size,
record_shuffle_size=record_shuffle_size)
shards = [parse_shard(dataset=shard) for shard in shards]
else:
##### Not enough input files, so do record-level sharding
tf.logging.info("Parse first")
# Parse records
dataset = parse_record(dataset,
parser=get_record_parser(offline_pos=True),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_names),
record_shuffle_size=record_shuffle_size)
# Split the dataset into `bsz_per_core` disjoint shards
shards = [dataset.shard(bsz_per_core, i) for i in range(bsz_per_core)]
# process each shard
process_shard = functools.partial(
lm_process, seq_len=seq_len, use_bfloat16=use_bfloat16)
shards = [process_shard(dataset=shard) for shard in shards]
# merge shards into a single batched dataset
def batch_zipped_dataset(*features):
"""Stack a list of homogeneous inputs from a zipped dataset into one."""
new_feature = {}
for key in features[0].keys():
tensor_list = [f[key] for f in features]
new_feature[key] = tf.stack(tensor_list, axis=0) # [sum bsz, length]
return new_feature
dataset = tf.data.Dataset.zip(tuple(shards))
dataset = dataset.map(batch_zipped_dataset)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def get_input_fn(
doc_dir,
semi_dir,
sent_dir,
split,
uncased,
seq_len,
bsz_per_host,
num_hosts=1,
num_core_per_host=1,
use_bfloat16=False,
**kwargs):
"""Create Estimator input function."""
def dir_to_paths(data_dir, data_type):
"""Get data file paths in the given dir."""
file_paths = []
if data_dir:
tf.logging.info("=" * 120)
case_str = "uncased." if uncased else ""
glob_base = "data.{}.{}.{}tfrecord*".format(split, data_type, case_str)
for idx, dir_path in enumerate(data_dir.split(",")):
glob = os.path.join(dir_path, glob_base)
cur_file_paths = sorted(tf.io.gfile.glob(glob))
file_paths += cur_file_paths
tf.logging.info("[%d] Data glob: %s", idx, glob)
tf.logging.info("[%d] Num of file path: %d", idx, len(cur_file_paths))
tf.logging.info("[%s] Total number of file path: %d", data_type,
len(file_paths))
return file_paths
doc_files = dir_to_paths(doc_dir, "doc")
semi_files = dir_to_paths(semi_dir, "doc")
sent_files = dir_to_paths(sent_dir, "sent")
file_list = [doc_files, semi_files, sent_files]
func_list = [doc_lm_dataset, semidoc_lm_dataset, sent_lm_dataset]
def input_fn(params):
"""Construct input function for TPUEstimator."""
assert params["batch_size"] * num_core_per_host == bsz_per_host
datasets = []
for files, func in zip(file_list, func_list):
if files:
cur_dataset = func(
params=params,
num_hosts=num_hosts,
num_core_per_host=num_core_per_host,
is_training=split == "train",
file_names=files,
seq_len=seq_len,
use_bfloat16=use_bfloat16,
**kwargs)
datasets.append(cur_dataset)
if len(datasets) > 1:
dataset = tf.data.experimental.sample_from_datasets(datasets)
elif len(datasets) == 1:
dataset = datasets[0]
return dataset
return input_fn
|
the-stack_0_13461 | import discord
import os
import datetime
from keep_alive import keep_alive
from discord.ext import commands
my_secret = os.environ['Token']
# client = discord.Client()
# test
bot = commands.Bot(command_prefix='!')
@bot.command(pass_context=True,
help="Update the role of user if you have Admin role eg: !updaterole 'xyz#0000' 'Admin'.",
brief="-Update the role of user."
)
@commands.has_role("Admin")
async def updaterole(ctx, user: discord.Member, role,help="This is role"):
member = user
print(member)
var = discord.utils.get(ctx.guild.roles, name = role)
print(var)
await member.add_roles(var)
await ctx.send(f'Role `{member}` has been Asigned with role {role}')
@bot.command(aliases=['make_role'], help="-Update the role of user if you have Admin role eg: !make_role 'XYZ' ",
brief="-Update the role of user."
)
@commands.has_permissions(manage_roles=True) # Check if the user executing the command can manage roles
async def create_role(ctx, name):
guild = ctx.guild
await guild.create_role(name=name)
await ctx.send(f'Role `{name}` has been created')
@bot.command(name="Poro",help="-Type Poro with prefix of '!' to comunicate with me . ")
async def x(ctx):
emoji = '\N{THUMBS UP SIGN}'
# member = ctx.author
await ctx.send(f"Hello {ctx.author} am Poro and i like you {emoji}. :")
# guild = ctx.guild
# await guild.create_role(name="role name")
async def on_ready():
print(f"{bot.user.name} has connected With you !")
@bot.command(name="create_channel",help="-to create channel eg:!create_channel 'XYZ'")
@commands.has_role("Admin")
async def create_Channel(xx,channel_name):
guild=xx.guild
existing_channel=discord.utils.get(guild.channels, name=channel_name)
if not existing_channel:
print(f"created new channel:{channel_name}")
await guild.create_text_channel(channel_name)
await xx.send(f"i have create channel with name {channel_name} channel created by {xx.author} on {datetime.datetime.now()}")
# @client.event
# async def on_ready():
# print(f'{client.user.name} has connected to Discord!')
# @client.event
# async def on_member_join(member):
# await member.create_dm()
# await member.dm_channel.send(
# f'Hi {member.name}, welcome to my Discord server!'
# )
# # client.run(my_secret)
# @client.event
# async def on_message(message):
# if message.author== client.user:
# return
# if message.content.startswith("$hello"):
# await message.channel.send(f"Hello! {message.author}")
keep_alive()
bot.run(my_secret) |
the-stack_0_13463 | # Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
import mock
import pytest
def test_heartbeat_inactive_manager_active_rpc(caplog):
caplog.set_level(logging.DEBUG)
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
manager.is_active = False
manager.heartbeat.return_value = True # because of active rpc
heartbeater_ = heartbeater.Heartbeater(manager)
make_sleep_mark_event_as_done(heartbeater_)
heartbeater_.heartbeat()
assert "Sent heartbeat" in caplog.text
assert "exiting" in caplog.text
def test_heartbeat_inactive_manager_inactive_rpc(caplog):
caplog.set_level(logging.DEBUG)
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
manager.is_active = False
manager.heartbeat.return_value = False # because of inactive rpc
heartbeater_ = heartbeater.Heartbeater(manager)
make_sleep_mark_event_as_done(heartbeater_)
heartbeater_.heartbeat()
assert "Sent heartbeat" not in caplog.text
assert "exiting" in caplog.text
def test_heartbeat_stopped(caplog):
caplog.set_level(logging.DEBUG)
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
heartbeater_.stop()
heartbeater_.heartbeat()
assert "Sent heartbeat" not in caplog.text
assert "exiting" in caplog.text
def make_sleep_mark_event_as_done(heartbeater):
# Make sleep actually trigger the done event so that heartbeat()
# exits at the end of the first run.
def trigger_done(timeout):
assert timeout
heartbeater._stop_event.set()
heartbeater._stop_event.wait = trigger_done
def test_heartbeat_once():
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
make_sleep_mark_event_as_done(heartbeater_)
heartbeater_.heartbeat()
manager.heartbeat.assert_called_once()
@mock.patch("threading.Thread", autospec=True)
def test_start(thread):
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
heartbeater_.start()
thread.assert_called_once_with(
name=heartbeater._HEARTBEAT_WORKER_NAME, target=heartbeater_.heartbeat
)
thread.return_value.start.assert_called_once()
assert heartbeater_._thread is not None
@mock.patch("threading.Thread", autospec=True)
def test_start_already_started(thread):
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
heartbeater_._thread = mock.sentinel.thread
with pytest.raises(ValueError):
heartbeater_.start()
thread.assert_not_called()
def test_stop():
manager = mock.create_autospec(
streaming_pull_manager.StreamingPullManager, instance=True
)
heartbeater_ = heartbeater.Heartbeater(manager)
thread = mock.create_autospec(threading.Thread, instance=True)
heartbeater_._thread = thread
heartbeater_.stop()
assert heartbeater_._stop_event.is_set()
thread.join.assert_called_once()
assert heartbeater_._thread is None
def test_stop_no_join():
heartbeater_ = heartbeater.Heartbeater(mock.sentinel.manager)
heartbeater_.stop()
|
the-stack_0_13464 | # Copyright (c) 2019 Toyota Research Institute. All rights reserved.
"""
This module provides objects related to the discovery of
new crystal structures using structural domains.
"""
import pandas as pd
import os
from datetime import datetime
from monty.serialization import dumpfn
from camd.domain import StructureDomain, heuristic_setup
from camd.agent.stability import AgentStabilityAdaBoost
from camd.agent.base import RandomAgent
from camd.experiment.base import ATFSampler
from camd.campaigns.base import Campaign
from camd import CAMD_TEST_FILES, CAMD_S3_BUCKET, __version__
from camd.utils.data import load_dataframe, s3_sync
from camd.analysis import StabilityAnalyzer
from camd.experiment.dft import OqmdDFTonMC1
from sklearn.neural_network import MLPRegressor
import pickle
class ProtoDFTCampaign(Campaign):
"""
Subclass of Campaign which implements custom methods
and factories for constructing prototype-generation
stability campaigns for materials discovery with DFT
experiments
"""
@classmethod
def from_chemsys(cls, chemsys, prefix="proto-dft-2/runs"):
"""
Class factory method for constructing campaign from
chemsys.
Args:
chemsys (str): chemical system for the campaign
prefix (str): prefix for s3
Returns:
(ProtoDFTCampaign): Standard proto-dft campaign from
the chemical system
"""
s3_prefix = "{}/{}".format(prefix, chemsys)
# Initialize s3
dumpfn({"started": datetime.now().isoformat(),
"version": __version__}, "start.json")
s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')
# Get structure domain
element_list = chemsys.split('-')
max_coeff, charge_balanced = heuristic_setup(element_list)
domain = StructureDomain.from_bounds(
element_list, charge_balanced=charge_balanced,
n_max_atoms=20, **{'grid': range(1, max_coeff)})
candidate_data = domain.candidates()
# Dump structure/candidate data
with open('candidate_data.pickle', 'wb') as f:
pickle.dump(candidate_data, f)
s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')
# Set up agents and loop parameters
agent = AgentStabilityAdaBoost(
model=MLPRegressor(hidden_layer_sizes=(84, 50)),
n_query=10,
hull_distance=0.2,
exploit_fraction=1.0,
uncertainty=True,
alpha=0.5,
diversify=True,
n_estimators=20
)
analyzer = StabilityAnalyzer(hull_distance=0.2)
experiment = OqmdDFTonMC1(timeout=30000)
seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")
# Construct and start loop
return cls(
candidate_data=candidate_data, agent=agent, experiment=experiment,
analyzer=analyzer, seed_data=seed_data,
heuristic_stopper=5, s3_prefix=s3_prefix
)
def autorun(self):
"""
Method for running this campaign automatically
Returns:
None
"""
n_max_iter = n_max_iter_heuristics(
len(self.candidate_data), 10)
self.auto_loop(
n_iterations=n_max_iter, monitor=True,
initialize=True, save_iterations=True
)
class CloudATFCampaign(Campaign):
"""
Simple subclass for cloud-based ATF, mostly for testing
"""
@classmethod
def from_chemsys(cls, chemsys):
"""
Args:
chemsys:
Returns:
"""
s3_prefix = "oqmd-atf/runs/{}".format(chemsys)
df = pd.read_csv(os.path.join(CAMD_TEST_FILES, 'test_df.csv'))
n_seed = 200 # Starting sample size
n_query = 10 # This many new candidates are "calculated with DFT" (i.e. requested from Oracle -- DFT)
agent = RandomAgent(n_query=n_query)
analyzer = StabilityAnalyzer(hull_distance=0.05)
experiment = ATFSampler(dataframe=df)
candidate_data = df
return cls(candidate_data, agent, experiment, analyzer,
create_seed=n_seed, s3_prefix=s3_prefix)
def autorun(self):
"""
Runs campaign with standard parameters
Returns:
None
"""
self.auto_loop(initialize=True, n_iterations=3)
return True
def n_max_iter_heuristics(n_data, n_query, low_bound=5, up_bound=20):
"""
Helper method to define maximum number of iterations for
a given campaign. This is based on the empirical evidence
in various systems >90% of stable materials are identified
when 25% of candidates are tested. We also enforce upper
and lower bounds of 20 and 5 to avoid edge cases with too
many or too few calculations to run.
Args:
n_data (int): number of data points in candidate space
n_query (int): number of queries allowed in each iteration
low_bound (int): lower bound allowed for n_max_iter
up_bound (int): upper bound allowed for n_max_ite
Returns:
maximum number of iterations as integer
"""
_target = round(n_data * 0.25/n_query)
if _target < low_bound:
return low_bound
else:
return min(_target, up_bound)
|
the-stack_0_13466 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "prometheus", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-prometheus',
version=ABOUT["__version__"],
description='The prometheus check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent prometheus check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='New BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.prometheus'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_0_13467 | # -*- coding: utf-8 -*-
import logging, urllib, time
from django.utils.translation import gettext as _
from django.utils.timezone import now
from crontab_monitor.models import SelectOption, single_entry_point
def single_entry_point_of_crontab(*args, **kw):
lg = logging.getLogger('django-crontab-monitor')
kw['executed_from'] = kw.get('executed_from', 'crontab')
single_entry_point(*args, **kw)
message = 'Done from single_entry_point_of_crontab'
lg.info(message)
def check_outside_web(alert_log, *args, web_urls='https://www.google.com/|https://www.ho600.com/', **kw):
lg = logging.getLogger('django-crontab-monitor')
lg.debug("alert_log id: {}".format(alert_log.id))
lg.debug("web_urls: {}".format(web_urls))
web_urls = web_urls.split('|')
title = _('No alarm, just logging')
status = SelectOption.objects.get(swarm='alert-log-status', value='LOG')
mail_body = "Executed from {}\n".format(kw.get('executed_from', '__none__'))
mail_body += "args: {}\n".format(args)
mail_body += "kw: {}\n".format(kw)
t0 = time.time()
for url in web_urls:
lg.debug("url: {}".format(url))
try:
res = urllib.request.urlopen(url)
except Exception as e:
status = SelectOption.objects.get(swarm='alert-log-status', value='ALARM')
title = _('Alarm on {url}').format(url=url)
mail_body += 'Exception: {}\n'.format(e)
else:
if res.status == 200:
t1 = time.time()
mail_body += 'Duration of {}: {} seconds\n'.format(url, t1-t0)
t0 = t1
else:
title = _('Alarm on {url}').format(url=url)
status = SelectOption.objects.get(swarm='alert-log-status', value='ALARM')
mail_body += '{} Error: {}\n'.format(res.status, res.read())
if status.value != 'LOG':
break
for receiver in alert_log.inspection.get_receive_notification_users():
alert_log.receivers.add(receiver)
alert_log.title = title
alert_log.mail_body = mail_body
alert_log.status = status
alert_log.executed_end_time = now()
alert_log.save()
lg.info("title: {}".format(alert_log.title))
lg.info("status: {}".format(alert_log.status)) |
the-stack_0_13469 | """Test the arraymodule.
Roger E. Masse
"""
import unittest
from test import support
from test.support import _2G
import weakref
import pickle
import operator
import struct
import sys
import warnings
import array
# from array import _array_reconstructor as array_reconstructor # XXX: RUSTPYTHON
# sizeof_wchar = array.array('u').itemsize # XXX: RUSTPYTHON
class ArraySubclass(array.array):
pass
class ArraySubclassWithKwargs(array.array):
def __init__(self, typecode, newarg=None):
array.array.__init__(self)
# TODO: RUSTPYTHON
# We did not support typecode u for unicode yet
# typecodes = 'ubBhHiIlLfdqQ'
typecodes = 'bBhHiIlLfdqQ'
class MiscTest(unittest.TestCase):
def test_bad_constructor(self):
self.assertRaises(TypeError, array.array)
self.assertRaises(TypeError, array.array, spam=42)
self.assertRaises(TypeError, array.array, 'xx')
self.assertRaises(ValueError, array.array, 'x')
def test_empty(self):
# Exercise code for handling zero-length arrays
a = array.array('B')
a[:] = a
self.assertEqual(len(a), 0)
self.assertEqual(len(a + a), 0)
self.assertEqual(len(a * 3), 0)
a += a
self.assertEqual(len(a), 0)
# Machine format codes.
#
# Search for "enum machine_format_code" in Modules/arraymodule.c to get the
# authoritative values.
UNKNOWN_FORMAT = -1
UNSIGNED_INT8 = 0
SIGNED_INT8 = 1
UNSIGNED_INT16_LE = 2
UNSIGNED_INT16_BE = 3
SIGNED_INT16_LE = 4
SIGNED_INT16_BE = 5
UNSIGNED_INT32_LE = 6
UNSIGNED_INT32_BE = 7
SIGNED_INT32_LE = 8
SIGNED_INT32_BE = 9
UNSIGNED_INT64_LE = 10
UNSIGNED_INT64_BE = 11
SIGNED_INT64_LE = 12
SIGNED_INT64_BE = 13
IEEE_754_FLOAT_LE = 14
IEEE_754_FLOAT_BE = 15
IEEE_754_DOUBLE_LE = 16
IEEE_754_DOUBLE_BE = 17
UTF16_LE = 18
UTF16_BE = 19
UTF32_LE = 20
UTF32_BE = 21
class ArrayReconstructorTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_error(self):
self.assertRaises(TypeError, array_reconstructor,
"", "b", 0, b"")
self.assertRaises(TypeError, array_reconstructor,
str, "b", 0, b"")
self.assertRaises(TypeError, array_reconstructor,
array.array, "b", '', b"")
self.assertRaises(TypeError, array_reconstructor,
array.array, "b", 0, "")
self.assertRaises(ValueError, array_reconstructor,
array.array, "?", 0, b"")
self.assertRaises(ValueError, array_reconstructor,
array.array, "b", UNKNOWN_FORMAT, b"")
self.assertRaises(ValueError, array_reconstructor,
array.array, "b", 22, b"")
self.assertRaises(ValueError, array_reconstructor,
array.array, "d", 16, b"a")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_numbers(self):
testcases = (
(['B', 'H', 'I', 'L'], UNSIGNED_INT8, '=BBBB',
[0x80, 0x7f, 0, 0xff]),
(['b', 'h', 'i', 'l'], SIGNED_INT8, '=bbb',
[-0x80, 0x7f, 0]),
(['H', 'I', 'L'], UNSIGNED_INT16_LE, '<HHHH',
[0x8000, 0x7fff, 0, 0xffff]),
(['H', 'I', 'L'], UNSIGNED_INT16_BE, '>HHHH',
[0x8000, 0x7fff, 0, 0xffff]),
(['h', 'i', 'l'], SIGNED_INT16_LE, '<hhh',
[-0x8000, 0x7fff, 0]),
(['h', 'i', 'l'], SIGNED_INT16_BE, '>hhh',
[-0x8000, 0x7fff, 0]),
(['I', 'L'], UNSIGNED_INT32_LE, '<IIII',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['I', 'L'], UNSIGNED_INT32_BE, '>IIII',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['i', 'l'], SIGNED_INT32_LE, '<iii',
[-1<<31, (1<<31)-1, 0]),
(['i', 'l'], SIGNED_INT32_BE, '>iii',
[-1<<31, (1<<31)-1, 0]),
(['L'], UNSIGNED_INT64_LE, '<QQQQ',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['L'], UNSIGNED_INT64_BE, '>QQQQ',
[1<<31, (1<<31)-1, 0, (1<<32)-1]),
(['l'], SIGNED_INT64_LE, '<qqq',
[-1<<31, (1<<31)-1, 0]),
(['l'], SIGNED_INT64_BE, '>qqq',
[-1<<31, (1<<31)-1, 0]),
# The following tests for INT64 will raise an OverflowError
# when run on a 32-bit machine. The tests are simply skipped
# in that case.
(['L'], UNSIGNED_INT64_LE, '<QQQQ',
[1<<63, (1<<63)-1, 0, (1<<64)-1]),
(['L'], UNSIGNED_INT64_BE, '>QQQQ',
[1<<63, (1<<63)-1, 0, (1<<64)-1]),
(['l'], SIGNED_INT64_LE, '<qqq',
[-1<<63, (1<<63)-1, 0]),
(['l'], SIGNED_INT64_BE, '>qqq',
[-1<<63, (1<<63)-1, 0]),
(['f'], IEEE_754_FLOAT_LE, '<ffff',
[16711938.0, float('inf'), float('-inf'), -0.0]),
(['f'], IEEE_754_FLOAT_BE, '>ffff',
[16711938.0, float('inf'), float('-inf'), -0.0]),
(['d'], IEEE_754_DOUBLE_LE, '<dddd',
[9006104071832581.0, float('inf'), float('-inf'), -0.0]),
(['d'], IEEE_754_DOUBLE_BE, '>dddd',
[9006104071832581.0, float('inf'), float('-inf'), -0.0])
)
for testcase in testcases:
valid_typecodes, mformat_code, struct_fmt, values = testcase
arraystr = struct.pack(struct_fmt, *values)
for typecode in valid_typecodes:
try:
a = array.array(typecode, values)
except OverflowError:
continue # Skip this test case.
b = array_reconstructor(
array.array, typecode, mformat_code, arraystr)
self.assertEqual(a, b,
msg="{0!r} != {1!r}; testcase={2!r}".format(a, b, testcase))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_unicode(self):
teststr = "Bonne Journ\xe9e \U0002030a\U00020347"
testcases = (
(UTF16_LE, "UTF-16-LE"),
(UTF16_BE, "UTF-16-BE"),
(UTF32_LE, "UTF-32-LE"),
(UTF32_BE, "UTF-32-BE")
)
for testcase in testcases:
mformat_code, encoding = testcase
a = array.array('u', teststr)
b = array_reconstructor(
array.array, 'u', mformat_code, teststr.encode(encoding))
self.assertEqual(a, b,
msg="{0!r} != {1!r}; testcase={2!r}".format(a, b, testcase))
class BaseTest:
# Required class attributes (provided by subclasses
# typecode: the typecode to test
# example: an initializer usable in the constructor for this type
# smallerexample: the same length as example, but smaller
# biggerexample: the same length as example, but bigger
# outside: An entry that is not in example
# minitemsize: the minimum guaranteed itemsize
def assertEntryEqual(self, entry1, entry2):
self.assertEqual(entry1, entry2)
def badtypecode(self):
# Return a typecode that is different from our own
return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
self.assertGreaterEqual(a.itemsize, self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
a = array.array(self.typecode)
a.append(self.example[0])
self.assertEqual(len(a), 1)
a = array.array(self.typecode, self.example)
self.assertEqual(len(a), len(self.example))
def test_buffer_info(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assertIsInstance(bi, tuple)
self.assertEqual(len(bi), 2)
self.assertIsInstance(bi[0], int)
self.assertIsInstance(bi[1], int)
self.assertEqual(bi[1], len(a))
def test_byteswap(self):
if self.typecode == 'u':
example = '\U00100100'
else:
example = self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
b.byteswap()
self.assertEqual(a, b)
def test_copy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.copy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_deepcopy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.deepcopy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reduce_ex(self):
a = array.array(self.typecode, self.example)
for protocol in range(3):
self.assertIs(a.__reduce_ex__(protocol)[0], array.array)
for protocol in range(3, pickle.HIGHEST_PROTOCOL + 1):
self.assertIs(a.__reduce_ex__(protocol)[0], array_reconstructor)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle(self):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode, self.example)
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode, self.example)
a.x = 10
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle_for_empty_array(self):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode)
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode)
a.x = 10
b = pickle.loads(pickle.dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_iterator_pickle(self):
orig = array.array(self.typecode, self.example)
data = list(orig)
data2 = data[::-1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data + data2)
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[1:] + data2)
# empty iterator
for i in range(1, len(data)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data2)
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a.fromlist(data2)
self.assertEqual(list(it), [])
def test_exhausted_iterator(self):
a = array.array(self.typecode, self.example)
self.assertEqual(list(a), list(self.example))
exhit = iter(a)
empit = iter(a)
for x in exhit: # exhaust the iterator
next(empit) # not exhausted
a.append(self.outside)
self.assertEqual(list(exhit), [])
self.assertEqual(list(empit), [self.outside])
self.assertEqual(list(a), list(self.example) + [self.outside])
def test_insert(self):
a = array.array(self.typecode, self.example)
a.insert(0, self.example[0])
self.assertEqual(len(a), 1+len(self.example))
self.assertEqual(a[0], a[1])
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
self.assertRaises(TypeError, a.insert, 0, None)
a = array.array(self.typecode, self.example)
a.insert(-1, self.example[0])
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:-1] + self.example[:1] + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a.insert(-1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a.insert(1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[:1])
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofromfile(self):
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.tofile)
support.unlink(support.TESTFN)
f = open(support.TESTFN, 'wb')
try:
a.tofile(f)
f.close()
b = array.array(self.typecode)
f = open(support.TESTFN, 'rb')
self.assertRaises(TypeError, b.fromfile)
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
self.assertRaises(EOFError, b.fromfile, f, len(self.example)+1)
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fromfile_ioerror(self):
# Issue #5395: Check if fromfile raises a proper OSError
# instead of EOFError.
a = array.array(self.typecode)
f = open(support.TESTFN, 'wb')
try:
self.assertRaises(OSError, a.fromfile, f, len(self.example))
finally:
f.close()
support.unlink(support.TESTFN)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_filewrite(self):
a = array.array(self.typecode, 2*self.example)
f = open(support.TESTFN, 'wb')
try:
f.write(a)
f.close()
b = array.array(self.typecode)
f = open(support.TESTFN, 'rb')
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_tofromlist(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tolist, 42)
self.assertRaises(TypeError, b.fromlist)
self.assertRaises(TypeError, b.fromlist, 42)
self.assertRaises(TypeError, b.fromlist, [None])
b.fromlist(a.tolist())
self.assertEqual(a, b)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofromstring(self):
# Warnings not raised when arguments are incorrect as Argument Clinic
# handles that before the warning can be raised.
nb_warnings = 2
with warnings.catch_warnings(record=True) as r:
warnings.filterwarnings("always",
message=r"(to|from)string\(\) is deprecated",
category=DeprecationWarning)
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tostring, 42)
self.assertRaises(TypeError, b.fromstring)
self.assertRaises(TypeError, b.fromstring, 42)
b.fromstring(a.tostring())
self.assertEqual(a, b)
if a.itemsize>1:
self.assertRaises(ValueError, b.fromstring, "x")
nb_warnings += 1
self.assertEqual(len(r), nb_warnings)
def test_tofrombytes(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tobytes, 42)
self.assertRaises(TypeError, b.frombytes)
self.assertRaises(TypeError, b.frombytes, 42)
b.frombytes(a.tobytes())
c = array.array(self.typecode, bytearray(a.tobytes()))
self.assertEqual(a, b)
self.assertEqual(a, c)
if a.itemsize>1:
self.assertRaises(ValueError, b.frombytes, b"x")
def test_fromarray(self):
a = array.array(self.typecode, self.example)
b = array.array(self.typecode, a)
self.assertEqual(a, b)
def test_repr(self):
a = array.array(self.typecode, 2*self.example)
self.assertEqual(a, eval(repr(a), {"array": array.array}))
a = array.array(self.typecode)
self.assertEqual(repr(a), "array('%s')" % self.typecode)
def test_str(self):
a = array.array(self.typecode, 2*self.example)
str(a)
def test_cmp(self):
a = array.array(self.typecode, self.example)
self.assertIs(a == 42, False)
self.assertIs(a != 42, True)
self.assertIs(a == a, True)
self.assertIs(a != a, False)
self.assertIs(a < a, False)
self.assertIs(a <= a, True)
self.assertIs(a > a, False)
self.assertIs(a >= a, True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
self.assertIs(a == 2*a, False)
self.assertIs(a != 2*a, True)
self.assertIs(a < 2*a, True)
self.assertIs(a <= 2*a, True)
self.assertIs(a > 2*a, False)
self.assertIs(a >= 2*a, False)
self.assertIs(a == al, False)
self.assertIs(a != al, True)
self.assertIs(a < al, False)
self.assertIs(a <= al, False)
self.assertIs(a > al, True)
self.assertIs(a >= al, True)
self.assertIs(a == ab, False)
self.assertIs(a != ab, True)
self.assertIs(a < ab, True)
self.assertIs(a <= ab, True)
self.assertIs(a > ab, False)
self.assertIs(a >= ab, False)
def test_add(self):
a = array.array(self.typecode, self.example) \
+ array.array(self.typecode, self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[::-1])
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__add__, "bad")
def test_iadd(self):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
)
a = array.array(self.typecode, self.example)
a += a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__iadd__, "bad")
def test_mul(self):
a = 5*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a = array.array(self.typecode, self.example)*5
self.assertEqual(
a,
array.array(self.typecode, self.example*5)
)
a = 0*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = (-1)*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = 5 * array.array(self.typecode, self.example[:1])
self.assertEqual(
a,
array.array(self.typecode, [a[0]] * 5)
)
self.assertRaises(TypeError, a.__mul__, "bad")
def test_imul(self):
a = array.array(self.typecode, self.example)
b = a
a *= 5
self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
a *= -1
self.assertEqual(a, array.array(self.typecode))
self.assertRaises(TypeError, a.__imul__, "bad")
def test_getitem(self):
a = array.array(self.typecode, self.example)
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
self.assertEntryEqual(a[-len(self.example)], self.example[0])
self.assertRaises(TypeError, a.__getitem__)
self.assertRaises(IndexError, a.__getitem__, len(self.example))
self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
def test_setitem(self):
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[len(self.example)-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-len(self.example)] = a[-1]
self.assertEntryEqual(a[0], a[-1])
self.assertRaises(TypeError, a.__setitem__)
self.assertRaises(TypeError, a.__setitem__, None)
self.assertRaises(TypeError, a.__setitem__, 0, None)
self.assertRaises(
IndexError,
a.__setitem__,
len(self.example), self.example[0]
)
self.assertRaises(
IndexError,
a.__setitem__,
-len(self.example)-1, self.example[0]
)
def test_delitem(self):
a = array.array(self.typecode, self.example)
del a[0]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
a = array.array(self.typecode, self.example)
del a[-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[len(self.example)-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[-len(self.example)]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
self.assertRaises(TypeError, a.__delitem__)
self.assertRaises(TypeError, a.__delitem__, None)
self.assertRaises(IndexError, a.__delitem__, len(self.example))
self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
def test_getslice(self):
a = array.array(self.typecode, self.example)
self.assertEqual(a[:], a)
self.assertEqual(
a[1:],
array.array(self.typecode, self.example[1:])
)
self.assertEqual(
a[:1],
array.array(self.typecode, self.example[:1])
)
self.assertEqual(
a[:-1],
array.array(self.typecode, self.example[:-1])
)
self.assertEqual(
a[-1:],
array.array(self.typecode, self.example[-1:])
)
self.assertEqual(
a[-1:-1],
array.array(self.typecode)
)
self.assertEqual(
a[2:1],
array.array(self.typecode)
)
self.assertEqual(
a[1000:],
array.array(self.typecode)
)
self.assertEqual(a[-1000:], a)
self.assertEqual(a[:1000], a)
self.assertEqual(
a[:-1000],
array.array(self.typecode)
)
self.assertEqual(a[-1000:1000], a)
self.assertEqual(
a[2000:1000],
array.array(self.typecode)
)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing
# (Assumes list conversion works correctly, too)
a = array.array(self.typecode, self.example)
indices = (0, None, 1, 3, 19, 100, sys.maxsize, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
self.assertEqual(list(a[start:stop:step]),
list(a)[start:stop:step])
def test_setslice(self):
a = array.array(self.typecode, self.example)
a[:1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[:-1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[-1:])
)
a = array.array(self.typecode, self.example)
a[-1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:-1] = a
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:1] + self.example + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a[1000:] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[-1000:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:1000] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:-1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[1:0] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[2000:1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, sys.maxsize, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
a = array.array(self.typecode, self.example)
L = list(a)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
a[start:stop:step] = array.array(self.typecode, data)
self.assertEqual(a, array.array(self.typecode, L))
del L[start:stop:step]
del a[start:stop:step]
self.assertEqual(a, array.array(self.typecode, L))
def test_index(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.index)
for x in example:
self.assertEqual(a.index(x), example.index(x))
self.assertRaises(ValueError, a.index, None)
self.assertRaises(ValueError, a.index, self.outside)
def test_count(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.count)
for x in example:
self.assertEqual(a.count(x), example.count(x))
self.assertEqual(a.count(self.outside), 0)
self.assertEqual(a.count(None), 0)
def test_remove(self):
for x in self.example:
example = 2*self.example
a = array.array(self.typecode, example)
pos = example.index(x)
example2 = example[:pos] + example[pos+1:]
a.remove(x)
self.assertEqual(a, array.array(self.typecode, example2))
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.remove, self.outside)
self.assertRaises(ValueError, a.remove, None)
def test_pop(self):
a = array.array(self.typecode)
self.assertRaises(IndexError, a.pop)
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.pop, 42, 42)
self.assertRaises(TypeError, a.pop, None)
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
self.assertEntryEqual(a.pop(0), self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:]+self.example)
)
self.assertEntryEqual(a.pop(1), self.example[2])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
)
self.assertEntryEqual(a.pop(0), self.example[1])
self.assertEntryEqual(a.pop(), self.example[-1])
self.assertEqual(
a,
array.array(self.typecode, self.example[3:]+self.example[:-1])
)
def test_reverse(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.reverse, 42)
a.reverse()
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1])
)
def test_extend(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.extend)
a.extend(array.array(self.typecode, self.example[::-1]))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
a = array.array(self.typecode, self.example)
a.extend(a)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.extend, b)
a = array.array(self.typecode, self.example)
a.extend(self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
def test_constructor_with_iterable_argument(self):
a = array.array(self.typecode, iter(self.example))
b = array.array(self.typecode, self.example)
self.assertEqual(a, b)
# non-iterable argument
self.assertRaises(TypeError, array.array, self.typecode, 10)
# pass through errors raised in __iter__
class A:
def __iter__(self):
raise UnicodeError
self.assertRaises(UnicodeError, array.array, self.typecode, A())
# pass through errors raised in next()
def B():
raise UnicodeError
yield None
self.assertRaises(UnicodeError, array.array, self.typecode, B())
def test_coveritertraverse(self):
try:
import gc
except ImportError:
self.skipTest('gc module not available')
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
gc.collect()
def test_buffer(self):
a = array.array(self.typecode, self.example)
m = memoryview(a)
expected = m.tobytes()
self.assertEqual(a.tobytes(), expected)
self.assertEqual(a.tobytes()[0], expected[0])
# Resizing is forbidden when there are buffer exports.
# For issue 4509, we also check after each error that
# the array was not modified.
self.assertRaises(BufferError, a.append, a[0])
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.extend, a[0:1])
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.remove, a[0])
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.pop, 0)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.fromlist, a.tolist())
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, a.frombytes, a.tobytes())
self.assertEqual(m.tobytes(), expected)
if self.typecode == 'u':
self.assertRaises(BufferError, a.fromunicode, a.tounicode())
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.imul, a, 2)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.imul, a, 0)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.setitem, a, slice(0, 0), a)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.delitem, a, 0)
self.assertEqual(m.tobytes(), expected)
self.assertRaises(BufferError, operator.delitem, a, slice(0, 1))
self.assertEqual(m.tobytes(), expected)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = weakref.proxy(s)
self.assertEqual(p.tobytes(), s.tobytes())
s = None
self.assertRaises(ReferenceError, len, p)
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def test_bug_782369(self):
for i in range(10):
b = array.array('B', range(64))
rc = sys.getrefcount(10)
for i in range(10):
b = array.array('B', range(64))
self.assertEqual(rc, sys.getrefcount(10))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
ArraySubclassWithKwargs('b', newarg=1)
def test_create_from_bytes(self):
# XXX This test probably needs to be moved in a subclass or
# generalized to use self.typecode.
a = array.array('H', b"1234")
self.assertEqual(len(a) * a.itemsize, 4)
@support.cpython_only
def test_sizeof_with_buffer(self):
a = array.array(self.typecode, self.example)
basesize = support.calcvobjsize('Pn2Pi')
buffer_size = a.buffer_info()[1] * a.itemsize
support.check_sizeof(self, a, basesize + buffer_size)
@support.cpython_only
def test_sizeof_without_buffer(self):
a = array.array(self.typecode)
basesize = support.calcvobjsize('Pn2Pi')
support.check_sizeof(self, a, basesize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_initialize_with_unicode(self):
if self.typecode != 'u':
with self.assertRaises(TypeError) as cm:
a = array.array(self.typecode, 'foo')
self.assertIn("cannot use a str", str(cm.exception))
with self.assertRaises(TypeError) as cm:
a = array.array(self.typecode, array.array('u', 'foo'))
self.assertIn("cannot use a unicode array", str(cm.exception))
else:
a = array.array(self.typecode, "foo")
a = array.array(self.typecode, array.array('u', 'foo'))
@support.cpython_only
def test_obsolete_write_lock(self):
from _testcapi import getbuffer_with_null_view
a = array.array('B', b"")
self.assertRaises(BufferError, getbuffer_with_null_view, a)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, array.array,
(self.typecode,))
support.check_free_after_iterating(self, reversed, array.array,
(self.typecode,))
class StringTest(BaseTest):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setitem(self):
super().test_setitem()
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class UnicodeTest(StringTest, unittest.TestCase):
typecode = 'u'
example = '\x01\u263a\x00\ufeff'
smallerexample = '\x01\u263a\x00\ufefe'
biggerexample = '\x01\u263a\x01\ufeff'
outside = str('\x33')
minitemsize = 2
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_add(self):
super().test_add()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_buffer(self):
super().test_buffer()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_buffer_info(self):
super().test_buffer_info()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_byteswap(self):
super().test_byteswap()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp(self):
super().test_cmp()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
super().test_constructor()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor_with_iterable_argument(self):
super().test_constructor_with_iterable_argument()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_copy(self):
super().test_copy()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_count(self):
super().test_count()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_coveritertraverse(self):
super().test_coveritertraverse()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_deepcopy(self):
super().test_deepcopy()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_delitem(self):
super().test_delitem()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exhausted_iterator(self):
super().test_exhausted_iterator()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_extend(self):
super().test_extend()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_extended_getslice(self):
super().test_extended_getslice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_extended_set_del_slice(self):
super().test_extended_set_del_slice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fromarray(self):
super().test_fromarray()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_getitem(self):
super().test_getitem()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_getslice(self):
super().test_getslice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_iadd(self):
super().test_iadd()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_imul(self):
super().test_imul()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_index(self):
super().test_index()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_insert(self):
super().test_insert()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_len(self):
super().test_len()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mul(self):
super().test_mul()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pop(self):
super().test_pop()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_remove(self):
super().test_remove()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr(self):
super().test_repr()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reverse(self):
super().test_reverse()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setslice(self):
super().test_setslice()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_str(self):
super().test_str()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofrombytes(self):
super().test_tofrombytes()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tofromlist(self):
super().test_tofromlist()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_unicode(self):
self.assertRaises(TypeError, array.array, 'b', 'foo')
a = array.array('u', '\xa0\xc2\u1234')
a.fromunicode(' ')
a.fromunicode('')
a.fromunicode('')
a.fromunicode('\x11abc\xff\u1234')
s = a.tounicode()
self.assertEqual(s, '\xa0\xc2\u1234 \x11abc\xff\u1234')
self.assertEqual(a.itemsize, sizeof_wchar)
s = '\x00="\'a\\b\x80\xff\u0000\u0001\u1234'
a = array.array('u', s)
self.assertEqual(
repr(a),
"array('u', '\\x00=\"\\'a\\\\b\\x80\xff\\x00\\x01\u1234')")
self.assertRaises(TypeError, a.fromunicode)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue17223(self):
# this used to crash
if sizeof_wchar == 4:
# U+FFFFFFFF is an invalid code point in Unicode 6.0
invalid_str = b'\xff\xff\xff\xff'
else:
# PyUnicode_FromUnicode() cannot fail with 16-bit wchar_t
self.skipTest("specific to 32-bit wchar_t")
a = array.array('u', invalid_str)
self.assertRaises(ValueError, a.tounicode)
self.assertRaises(ValueError, str, a)
class NumberTest(BaseTest):
def test_extslice(self):
a = array.array(self.typecode, range(5))
self.assertEqual(a[::], a)
self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100:100:2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
def test_delslice(self):
a = array.array(self.typecode, range(5))
del a[::2]
self.assertEqual(a, array.array(self.typecode, [1,3]))
a = array.array(self.typecode, range(5))
del a[1::2]
self.assertEqual(a, array.array(self.typecode, [0,2,4]))
a = array.array(self.typecode, range(5))
del a[1::-2]
self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
a = array.array(self.typecode, range(10))
del a[::1000]
self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
# test issue7788
a = array.array(self.typecode, range(10))
del a[9::1<<333]
def test_assignment(self):
a = array.array(self.typecode, range(10))
a[::2] = array.array(self.typecode, [42]*5)
self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
a = array.array(self.typecode, range(10))
a[::-4] = array.array(self.typecode, [10]*3)
self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = array.array(self.typecode, range(4))
a[::-1] = a
self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
a = array.array(self.typecode, range(10))
b = a[:]
c = a[:]
ins = array.array(self.typecode, range(2))
a[2:3] = ins
b[slice(2,3)] = ins
c[2:3:] = ins
def test_iterationcontains(self):
a = array.array(self.typecode, range(10))
self.assertEqual(list(a), list(range(10)))
b = array.array(self.typecode, [20])
self.assertEqual(a[-1] in a, True)
self.assertEqual(b[0] not in a, True)
def check_overflow(self, lower, upper):
# method to be used by subclasses
# should not overflow assigning lower limit
a = array.array(self.typecode, [lower])
a[0] = lower
# should overflow assigning less than lower limit
self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
# should not overflow assigning upper limit
a = array.array(self.typecode, [upper])
a[0] = upper
# should overflow assigning more than upper limit
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
__slots__ = ['offset']
def __new__(cls, typecode, data, offset):
return array.array.__new__(cls, typecode, data)
def __init__(self, typecode, data, offset):
self.offset = offset
def __getitem__(self, i):
return array.array.__getitem__(self, i) + self.offset
a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
self.assertEntryEqual(a[0], 7)
self.assertRaises(AttributeError, setattr, a, "color", "blue")
def test_frombytearray(self):
a = array.array('b', range(10))
b = array.array(self.typecode, a)
self.assertEqual(a, b)
class IntegerNumberTest(NumberTest):
def test_type_error(self):
a = array.array(self.typecode)
a.append(42)
with self.assertRaises(TypeError):
a.append(42.0)
with self.assertRaises(TypeError):
a[0] = 42.0
class Intable:
def __init__(self, num):
self._num = num
def __index__(self):
return self._num
def __int__(self):
return self._num
def __sub__(self, other):
return Intable(int(self) - int(other))
def __add__(self, other):
return Intable(int(self) + int(other))
class SignedNumberTest(IntegerNumberTest):
example = [-1, 0, 1, 42, 0x7f]
smallerexample = [-1, 0, 1, 42, 0x7e]
biggerexample = [-1, 0, 1, 43, 0x7f]
outside = 23
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_overflow(self):
a = array.array(self.typecode)
lower = -1 * int(pow(2, a.itemsize * 8 - 1))
upper = int(pow(2, a.itemsize * 8 - 1)) - 1
self.check_overflow(lower, upper)
self.check_overflow(Intable(lower), Intable(upper))
class UnsignedNumberTest(IntegerNumberTest):
example = [0, 1, 17, 23, 42, 0xff]
smallerexample = [0, 1, 17, 23, 42, 0xfe]
biggerexample = [0, 1, 17, 23, 43, 0xff]
outside = 0xaa
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_overflow(self):
a = array.array(self.typecode)
lower = 0
upper = int(pow(2, a.itemsize * 8)) - 1
self.check_overflow(lower, upper)
self.check_overflow(Intable(lower), Intable(upper))
def test_bytes_extend(self):
s = bytes(self.example)
a = array.array(self.typecode, self.example)
a.extend(s)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
a = array.array(self.typecode, self.example)
a.extend(bytearray(reversed(s)))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
class ByteTest(SignedNumberTest, unittest.TestCase):
typecode = 'b'
minitemsize = 1
class UnsignedByteTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'B'
minitemsize = 1
class ShortTest(SignedNumberTest, unittest.TestCase):
typecode = 'h'
minitemsize = 2
class UnsignedShortTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'H'
minitemsize = 2
class IntTest(SignedNumberTest, unittest.TestCase):
typecode = 'i'
minitemsize = 2
class UnsignedIntTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'I'
minitemsize = 2
class LongTest(SignedNumberTest, unittest.TestCase):
typecode = 'l'
minitemsize = 4
class UnsignedLongTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'L'
minitemsize = 4
class LongLongTest(SignedNumberTest, unittest.TestCase):
typecode = 'q'
minitemsize = 8
class UnsignedLongLongTest(UnsignedNumberTest, unittest.TestCase):
typecode = 'Q'
minitemsize = 8
class FPTest(NumberTest):
example = [-42.0, 0, 42, 1e5, -1e10]
smallerexample = [-42.0, 0, 42, 1e5, -2e10]
biggerexample = [-42.0, 0, 42, 1e5, 1e10]
outside = 23
def assertEntryEqual(self, entry1, entry2):
self.assertAlmostEqual(entry1, entry2)
def test_nan(self):
a = array.array(self.typecode, [float('nan')])
b = array.array(self.typecode, [float('nan')])
self.assertIs(a != b, True)
self.assertIs(a == b, False)
self.assertIs(a > b, False)
self.assertIs(a >= b, False)
self.assertIs(a < b, False)
self.assertIs(a <= b, False)
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
# On alphas treating the byte swapped bit patters as
# floats/doubles results in floating point exceptions
# => compare the 8bit string values instead
self.assertNotEqual(a.tobytes(), b.tobytes())
b.byteswap()
self.assertEqual(a, b)
class FloatTest(FPTest, unittest.TestCase):
typecode = 'f'
minitemsize = 4
class DoubleTest(FPTest, unittest.TestCase):
typecode = 'd'
minitemsize = 8
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'capacity overflow'")
def test_alloc_overflow(self):
from sys import maxsize
a = array.array('d', [-1]*65536)
try:
a *= maxsize//65536 + 1
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
b = array.array('d', [ 2.71828183, 3.14159265, -1])
try:
b * (maxsize//3 + 1)
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
class LargeArrayTest(unittest.TestCase):
typecode = 'b'
def example(self, size):
# We assess a base memuse of <=2.125 for constructing this array
base = array.array(self.typecode, [0, 1, 2, 3, 4, 5, 6, 7]) * (size // 8)
base += array.array(self.typecode, [99]*(size % 8) + [8, 9, 10, 11])
return base
@support.bigmemtest(_2G, memuse=2.125)
def test_example_data(self, size):
example = self.example(size)
self.assertEqual(len(example), size+4)
@support.bigmemtest(_2G, memuse=2.125)
def test_access(self, size):
example = self.example(size)
self.assertEqual(example[0], 0)
self.assertEqual(example[-(size+4)], 0)
self.assertEqual(example[size], 8)
self.assertEqual(example[-4], 8)
self.assertEqual(example[size+3], 11)
self.assertEqual(example[-1], 11)
@support.bigmemtest(_2G, memuse=2.125+1)
def test_slice(self, size):
example = self.example(size)
self.assertEqual(list(example[:4]), [0, 1, 2, 3])
self.assertEqual(list(example[-4:]), [8, 9, 10, 11])
part = example[1:-1]
self.assertEqual(len(part), size+2)
self.assertEqual(part[0], 1)
self.assertEqual(part[-1], 10)
del part
part = example[::2]
self.assertEqual(len(part), (size+5)//2)
self.assertEqual(list(part[:4]), [0, 2, 4, 6])
if size % 2:
self.assertEqual(list(part[-2:]), [9, 11])
else:
self.assertEqual(list(part[-2:]), [8, 10])
@support.bigmemtest(_2G, memuse=2.125)
def test_count(self, size):
example = self.example(size)
self.assertEqual(example.count(0), size//8)
self.assertEqual(example.count(11), 1)
@support.bigmemtest(_2G, memuse=2.125)
def test_append(self, size):
example = self.example(size)
example.append(12)
self.assertEqual(example[-1], 12)
@support.bigmemtest(_2G, memuse=2.125)
def test_extend(self, size):
example = self.example(size)
example.extend(iter([12, 13, 14, 15]))
self.assertEqual(len(example), size+8)
self.assertEqual(list(example[-8:]), [8, 9, 10, 11, 12, 13, 14, 15])
@support.bigmemtest(_2G, memuse=2.125)
def test_frombytes(self, size):
example = self.example(size)
example.frombytes(b'abcd')
self.assertEqual(len(example), size+8)
self.assertEqual(list(example[-8:]), [8, 9, 10, 11] + list(b'abcd'))
@support.bigmemtest(_2G, memuse=2.125)
def test_fromlist(self, size):
example = self.example(size)
example.fromlist([12, 13, 14, 15])
self.assertEqual(len(example), size+8)
self.assertEqual(list(example[-8:]), [8, 9, 10, 11, 12, 13, 14, 15])
@support.bigmemtest(_2G, memuse=2.125)
def test_index(self, size):
example = self.example(size)
self.assertEqual(example.index(0), 0)
self.assertEqual(example.index(1), 1)
self.assertEqual(example.index(7), 7)
self.assertEqual(example.index(11), size+3)
@support.bigmemtest(_2G, memuse=2.125)
def test_insert(self, size):
example = self.example(size)
example.insert(0, 12)
example.insert(10, 13)
example.insert(size+1, 14)
self.assertEqual(len(example), size+7)
self.assertEqual(example[0], 12)
self.assertEqual(example[10], 13)
self.assertEqual(example[size+1], 14)
@support.bigmemtest(_2G, memuse=2.125)
def test_pop(self, size):
example = self.example(size)
self.assertEqual(example.pop(0), 0)
self.assertEqual(example[0], 1)
self.assertEqual(example.pop(size+1), 10)
self.assertEqual(example[size+1], 11)
self.assertEqual(example.pop(1), 2)
self.assertEqual(example[1], 3)
self.assertEqual(len(example), size+1)
self.assertEqual(example.pop(), 11)
self.assertEqual(len(example), size)
@support.bigmemtest(_2G, memuse=2.125)
def test_remove(self, size):
example = self.example(size)
example.remove(0)
self.assertEqual(len(example), size+3)
self.assertEqual(example[0], 1)
example.remove(10)
self.assertEqual(len(example), size+2)
self.assertEqual(example[size], 9)
self.assertEqual(example[size+1], 11)
@support.bigmemtest(_2G, memuse=2.125)
def test_reverse(self, size):
example = self.example(size)
example.reverse()
self.assertEqual(len(example), size+4)
self.assertEqual(example[0], 11)
self.assertEqual(example[3], 8)
self.assertEqual(example[-1], 0)
example.reverse()
self.assertEqual(len(example), size+4)
self.assertEqual(list(example[:4]), [0, 1, 2, 3])
self.assertEqual(list(example[-4:]), [8, 9, 10, 11])
# list takes about 9 bytes per element
@support.bigmemtest(_2G, memuse=2.125+9)
def test_tolist(self, size):
example = self.example(size)
ls = example.tolist()
self.assertEqual(len(ls), len(example))
self.assertEqual(ls[:8], list(example[:8]))
self.assertEqual(ls[-8:], list(example[-8:]))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_13474 | """
Utilities for input/output operations.
It is important to have DFS as global variable in this class to take advantatges of singeltons
Info: https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Singleton.html
All pages can import this file and retrive data by:
> from data_loader import DFS
> df_xx = DFS[xx] # xx is the name of the dataframe
"""
import io
import dropbox
import pandas as pd
import oyaml as yaml
import constants as c
import utilities as u
DBX = dropbox.Dropbox(u.get_secret(c.io.VAR_DROPBOX_TOKEN))
DFS = {}
YML = {}
def get_config():
""" retrives config yaml as ordered dict """
_, res = DBX.files_download(c.io.FILE_CONFIG)
return yaml.load(io.BytesIO(res.content), Loader=yaml.SafeLoader)
def get_money_lover_filename():
""" gets the name of the money lover excel file """
names = []
# Explore all files and save all that are valid
for x in DBX.files_list_folder(c.io.PATH_MONEY_LOVER).entries:
try:
# Try to parse date, if possible if a money lover file
pd.to_datetime(x.name.split(".")[0])
names.append(x.name)
except (TypeError, ValueError):
pass
return max(names)
def get_df_transactions():
"""
Retrives the df with transactions. It will read the newest money lover excel file
Returns:
raw dataframe with transactions
"""
_, res = DBX.files_download(c.io.FILE_TRANSACTIONS)
return pd.read_excel(io.BytesIO(res.content), index_col=0)
def get_data_without_transactions():
"""
Retrives all dataframes from data.xlsx file
Returns:
dict with raw dataframes from data.xlsx file
"""
_, res = DBX.files_download(c.io.FILE_DATA)
dfs = {x: pd.read_excel(io.BytesIO(res.content), sheet_name=x) for x in c.dfs.ALL_FROM_DATA}
return dfs
def sync():
""" Retrives all dataframes and update DFS global var """
DFS.update(get_data_without_transactions())
DFS[c.dfs.TRANS] = get_df_transactions()
YML.update(get_config())
# Do one sync when it is imported!
sync()
|
the-stack_0_13475 | import sys
import hashlib
if len(sys.argv) <= 1:
print("Provide string argument")
exit(-1)
else:
s = sys.argv[1]
result = ""
for i in s:
result = result + hashlib.sha256(i.encode("utf-8")).hexdigest() + "\n"
print(result)
|
the-stack_0_13477 | import configparser
import json
def write_config_to_file(config_dict, ini_fpath):
""" Writes a configuration to an ini file.
:param config_dict: (Dict) config to write
:param ini_fpath: (str) fpath to ini file
:return: (str) ini_file written to
"""
config = configparser.ConfigParser()
config["DEFAULT"] = {key: json.dumps(value) for key, value in config_dict.items()}
with open(ini_fpath, "w") as ini:
config.write(ini)
return ini_fpath
def read_config_from_file(ini_fpath):
"""
Reads a config file
:param ini_fpath:
:return: a dictionary of config parameters
"""
config = configparser.ConfigParser()
config.read(ini_fpath)
result = {}
for key in config["DEFAULT"]:
result[key] = json.loads(config["DEFAULT"][key])
return result
|
the-stack_0_13478 | """show_l2vpn.py
show l2vpn parser class
"""
import re
from netaddr import EUI
from ipaddress import ip_address
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any
from genie.libs.parser.base import *
class ShowL2vpnMacLearning(MetaParser):
"""Parser for show l2vpn mac-learning <mac_type> all location <location>"""
# TODO schema
def __init__(self, mac_type='mac', location='local', **kwargs):
self.location = location
self.mac_type = mac_type
super().__init__(**kwargs)
cli_command = 'show l2vpn mac-learning {mac_type} all location {location}'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command.format(
mac_type=self.mac_type,
location=self.location))
else:
out = output
result = {
'entries': [],
}
for line in out.splitlines():
line = line.rstrip()
# Topo ID Producer Next Hop(s) Mac Address IP Address
# ------- -------- ----------- -------------- ----------
# 1 0/0/CPU0 BE1.7 7777.7777.0002
# 0 0/0/CPU0 BV1 fc00.0001.0006 192.0.3.3
m = re.match(r'^(?P<topo_id>\d+)'
r' +(?P<producer>\S+)'
r' +(?:none|(?P<next_hop>\S+))'
r' +(?P<mac>[A-Za-z0-9]+\.[A-Za-z0-9]+\.[A-Za-z0-9]+)'
r'(?: +(?P<ip_address>\d+\.\d+\.\d+\.\d+|[A-Za-z0-9:]+))?$', line)
if m:
entry = {
'topo_id': eval(m.group('topo_id')),
'producer': m.group('producer'),
'next_hop': m.group('next_hop'),
'mac': EUI(m.group('mac')),
'ip_address': m.group('ip_address') \
and ip_address(m.group('ip_address')),
}
result['entries'].append(entry)
continue
return result
class ShowL2vpnForwardingBridgeDomainMacAddress(MetaParser):
"""Parser for:
show l2vpn forwarding bridge-domain mac-address location <location>
show l2vpn forwarding bridge-domain <bridge_domain> mac-address location <location>
"""
# TODO schema
def __init__(self,location=None,bridge_domain=None,**kwargs) :
assert location is not None
self.location = location
self.bridge_domain = bridge_domain
super().__init__(**kwargs)
cli_command = ['show l2vpn forwarding bridge-domain mac-address location {location}', \
'show l2vpn forwarding bridge-domain {bridge_domain} mac-address location {location}']
def cli(self,output=None):
if output is None:
if self.bridge_domain is None:
cmd = self.cli_command[0].format(location=self.location)
else:
cmd = self.cli_command[1].format(bridge_domain=self.bridge_domain,location=self.location)
out = self.device.execute(cmd)
else:
out = output
result = {
'entries' : []
}
## Sample Output
# To Resynchronize MAC table from the Network Processors, use the command...
# l2vpn resynchronize forwarding mac-address-table location <r/s/i>
#
# Mac Address Type Learned from/Filtered on LC learned Resync Age/Last Change Mapped to
# -------------- ------- --------------------------- ---------- ---------------------- --------------
# 0021.0001.0001 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0003 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0004 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0005 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0001 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0002 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0003 EVPN BD id: 0 N/A N/A N/A
# 1234.0001.0004 EVPN BD id: 0 N/A N/A N/A
# 0021.0001.0002 dynamic (40.40.40.40, 10007) N/A 14 Mar 12:46:04 N/A
# 1234.0001.0005 static (40.40.40.40, 10007) N/A N/A N/A
# 0021.0002.0005 dynamic BE1.2 N/A 14 Mar 12:46:04 N/A
# 1234.0002.0004 static BE1.2 N/A N/A N/A
title_found = False
header_processed = False
field_indice = []
def _retrieve_fields(line,field_indice):
res = []
for idx,(start,end) in enumerate(field_indice):
if idx == len(field_indice) - 1:
res.append(line[start:].strip())
else:
res.append(line[start:end].strip())
return res
lines = out.splitlines()
for idx,line in enumerate(lines):
if idx == len(lines) - 1:
break
line = line.rstrip()
if not header_processed:
# 1. check proper title header exist
if re.match(r"^Mac Address\s+Type\s+Learned from/Filtered on\s+LC learned\s+Resync Age/Last Change\s+Mapped to",line):
title_found = True
continue
# 2. get dash header line
if title_found and re.match(r"^(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)",line):
match = re.match(r"^(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)( +)(-+)",line)
start = 0
for field in match.groups():
if '-' in field:
end = start + len(field)
field_indice.append((start,end))
start = end
else:
start += len(field)
end += len(field)
header_processed = True
continue
else:
mac,mac_type,learned_from,lc_learned,resync_age,mapped_to = _retrieve_fields(line,field_indice)
result['entries'].append({
'mac' : mac,
'mac_type' : mac_type,
'learned_from' : learned_from,
'lc_learned' : lc_learned,
'resync_age' : resync_age,
'mapped_to' : mapped_to,
})
return result
class ShowL2vpnForwardingProtectionMainInterface(MetaParser):
"""Parser for show l2vpn forwarding protection main-interface location <location>"""
# TODO schema
def __init__(self,location=None,**kwargs):
assert location is not None
self.location = location
super().__init__(**kwargs)
cli_command = 'show l2vpn forwarding protection main-interface location {location}'
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command.format(location=self.location))
else:
out = output
result = {
'entries' : []
}
## Sample Output
# Main Interface ID Instance State
# -------------------------------- ---------- ------------
# VFI:ves-vfi-1 0 FORWARDING
# VFI:ves-vfi-1 1 BLOCKED
# VFI:ves-vfi-2 0 FORWARDING
# VFI:ves-vfi-2 1 FORWARDING
# VFI:ves-vfi-3 0 FORWARDING
# VFI:ves-vfi-3 1 BLOCKED
# VFI:ves-vfi-4 0 FORWARDING
# VFI:ves-vfi-4 1 FORWARDING
# PW:40.40.40.40,10001 0 FORWARDING
# PW:40.40.40.40,10001 1 BLOCKED
# PW:40.40.40.40,10007 0 FORWARDING
# PW:40.40.40.40,10007 1 FORWARDING
# PW:40.40.40.40,10011 0 FORWARDING
# PW:40.40.40.40,10011 1 FORWARDING
# PW:40.40.40.40,10017 0 FORWARDING
title_found = False
header_processed = False
field_indice = []
def _retrieve_fields(line,field_indice):
res = []
for idx,(start,end) in enumerate(field_indice):
if idx == len(field_indice) - 1:
res.append(line[start:].strip())
else:
res.append(line[start:end].strip())
return res
lines = out.splitlines()
for idx,line in enumerate(lines):
if idx == len(lines) - 1:
break
line = line.rstrip()
if not header_processed:
# 1. check proper title header exist
if re.match(r"^Main Interface ID\s+Instance\s+State",line):
title_found = True
continue
# 2. get dash header line
if title_found and re.match(r"^(-+)( +)(-+)( +)(-+)",line):
match = re.match(r"^(-+)( +)(-+)( +)(-+)",line)
start = 0
for field in match.groups():
if '-' in field:
end = start + len(field)
field_indice.append((start,end))
start = end
else:
start += len(field)
end += len(field)
header_processed = True
continue
else:
interface,instance_id,state = _retrieve_fields(line,field_indice)
result['entries'].append({
'interface' : interface,
'instance_id' : instance_id,
'state' : state,
})
return result
# vim: ft=python ts=8 sw=4 et
|
the-stack_0_13479 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: radial_resample.py
#
# Tests: mesh - 2D rectilinear, single domain,
# 3D rectilinear, single domain
# 3D unstructured, multiple domain
# plots - pseudocolor
#
# Defect ID: 1827
#
# Programmer: Kevin Griffin
# Date: Tue Jun 3 11:00:41 EST 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
# 2D, Rectilinear
ds = silo_data_path("rect2d.silo")
OpenDatabase(ds)
# clean-up 1's
AddPlot("Mesh", "quadmesh2d", 1, 1)
AddOperator("RadialResample")
RadialResampleAttrs = RadialResampleAttributes()
RadialResampleAttrs.isFast = 0
RadialResampleAttrs.minTheta = -45
RadialResampleAttrs.maxTheta = 90
RadialResampleAttrs.deltaTheta = 5
RadialResampleAttrs.radius = 0.5
RadialResampleAttrs.deltaRadius = 0.05
RadialResampleAttrs.center = (0.5, 0.5, 0.5)
RadialResampleAttrs.is3D = 0
SetOperatorOptions(RadialResampleAttrs)
AddPlot("Pseudocolor", "t", 1, 1)
DrawPlots()
Test("ops_radialresampleop_rect2d")
DeleteAllPlots()
CloseDatabase(ds)
#3D, Rectilinear
ds = silo_data_path("rect3d.silo")
OpenDatabase(ds)
AddPlot("Mesh", "quadmesh3d", 1, 1)
AddOperator("RadialResample")
RadialResampleAtts = RadialResampleAttributes()
RadialResampleAtts.isFast = 0
RadialResampleAtts.minTheta = -90
RadialResampleAtts.maxTheta = 90
RadialResampleAtts.deltaTheta = 5
RadialResampleAtts.radius = 0.5
RadialResampleAtts.deltaRadius = 0.05
RadialResampleAtts.center = (0.5, 0.5, 0.5)
RadialResampleAtts.is3D = 1
RadialResampleAtts.minAzimuth = 0
RadialResampleAtts.maxAzimuth = 360
RadialResampleAtts.deltaAzimuth = 5
SetOperatorOptions(RadialResampleAtts, 1)
AddPlot("Pseudocolor", "w", 1, 1)
DrawPlots()
Test("ops_radialresampleop_rect3d")
DeleteAllPlots()
CloseDatabase(ds)
#2D, Rectilinear, Multiple Domains
ds = silo_data_path("multi_rect2d.silo")
OpenDatabase(ds)
AddPlot("Mesh", "mesh1", 1, 1)
AddOperator("RadialResample", 1)
RadialResampleAtts = RadialResampleAttributes()
RadialResampleAtts.isFast = 0
RadialResampleAtts.minTheta = 0
RadialResampleAtts.maxTheta = 360
RadialResampleAtts.deltaTheta = 5
RadialResampleAtts.radius = 1
RadialResampleAtts.deltaRadius = 0.05
RadialResampleAtts.center = (0.3, 0, 0)
RadialResampleAtts.is3D = 0
RadialResampleAtts.minAzimuth = 0
RadialResampleAtts.maxAzimuth = 180
RadialResampleAtts.deltaAzimuth = 5
SetOperatorOptions(RadialResampleAtts, 1)
AddPlot("Pseudocolor", "vec_magnitude", 1, 1)
DrawPlots()
Test("ops_radialresampleop_multi_rect2d")
DeleteAllPlots()
CloseDatabase(ds)
# 3D, Rectilinear, Multiple Domains
ds = silo_data_path("multi_rect3d.silo")
OpenDatabase(ds)
AddPlot("Mesh", "mesh1", 1, 1)
AddOperator("RadialResample", 1)
RadialResampleAtts = RadialResampleAttributes()
RadialResampleAtts.isFast = 0
RadialResampleAtts.minTheta = -90
RadialResampleAtts.maxTheta = 90
RadialResampleAtts.deltaTheta = 5
RadialResampleAtts.radius = 0.5
RadialResampleAtts.deltaRadius = 0.05
RadialResampleAtts.center = (0.5, 0.5, 0.5)
RadialResampleAtts.is3D = 1
RadialResampleAtts.minAzimuth = 0
RadialResampleAtts.maxAzimuth = 360
RadialResampleAtts.deltaAzimuth = 5
SetOperatorOptions(RadialResampleAtts)
AddPlot("Pseudocolor", "w")
DrawPlots()
Test("ops_radialresampleop_multi_rect3d")
DeleteAllPlots()
CloseDatabase(ds)
Exit()
|
the-stack_0_13481 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Optional, Any
import torch
from pytorch_lightning.accelerators.accelerator import Accelerator, ReduceOp
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.distributed.dist import LightningDistributed
class GPUAccelerator(Accelerator):
amp_backend: AMPType
def __init__(self, trainer, cluster_environment=None):
"""
Runs training using a single GPU
Example::
# default
trainer = Trainer(accelerator=GPUAccelerator())
"""
super().__init__(trainer, cluster_environment)
self.dist = LightningDistributed()
self.nickname = None
def setup(self, model):
# call setup
self.trainer.call_setup_hook(model)
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.setup_optimizers(model)
# 16-bit
model = self.trainer.precision_connector.connect(model)
self.trainer.model = model
def train(self):
model = self.trainer.model
# set up training routine
self.trainer.train_loop.setup_training(model)
# train or test
results = self.train_or_test()
return results
def training_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.__training_step(args)
else:
output = self.__training_step(args)
return output
def __training_step(self, args):
batch = args[0]
batch = self.to_device(batch)
args[0] = batch
output = self.trainer.model.training_step(*args)
return output
def validation_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.__validation_step(args)
else:
output = self.__validation_step(args)
return output
def __validation_step(self, args):
batch = args[0]
batch = self.to_device(batch)
args[0] = batch
output = self.trainer.model.validation_step(*args)
return output
def test_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.__test_step(args)
else:
output = self.__test_step(args)
return output
def __test_step(self, args):
batch = args[0]
batch = self.to_device(batch)
args[0] = batch
output = self.trainer.model.test_step(*args)
return output
def to_device(self, batch):
gpu_id = 0
if isinstance(self.trainer.data_parallel_device_ids, list):
gpu_id = self.trainer.data_parallel_device_ids[0]
# Don't copy the batch since there is a single gpu that the batch could
# be referenced from and if there are multiple optimizers the batch will
# wind up copying it to the same device repeatedly.
return self.batch_to_device(batch, gpu_id)
def sync_tensor(self,
tensor: Union[torch.Tensor],
group: Optional[Any] = None,
reduce_op: Optional[Union[ReduceOp, str]] = None) -> torch.Tensor:
return tensor
|
the-stack_0_13483 | # 공공데이터 공영주차장 정보 DB에 저장 (총 14417개)
import json
from math import fsum
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.get_database('parking_lot')
def get_data():
with open('static/seoul_park_lot.json', encoding='UTF8') as json_file:
result = json.load(json_file)
datas = result["DATA"]
count = 0
for data in datas:
# 필요한 정보 정의(주차장명, 주소, 유/무료 구분, 야간 무료 여부, 기본 요금, 기본 시간, 추가 요금, 주간 시작 시간, 주간 종료 시간, 위도, 경도)
park_id = count
name = data['parking_name']
tel = data['tel']
address = data['addr']
free = data['pay_nm']
night_free = data['night_free_open']
basic_cost = data['rates']
basic_time = data['time_rate']
add_cost = data['add_rates']
wbt = data['weekday_begin_time']
wet = data['weekday_end_time']
# 시간 표기 방식 변경 ex) 1200 -> 12:00
weekday_begin_time = wbt[:2] + ":" + wbt[2:]
weekday_end_time = wet[:2] + ":" + wet[2:]
lat = data['lat']
lng = data['lng']
doc = {
"park_id": park_id,
"Name": name,
"Tel": tel,
"Address": address,
"Free": free,
"Night free": night_free,
"Basic_cost": basic_cost,
"Basic_time": basic_time,
"Add cost": add_cost,
"Weekday begin time": weekday_begin_time,
"Weekday end time": weekday_end_time,
"location": {
"type": 'Point',
"coordinates": [lng, lat] # [경도,위도] 순서
},
}
count += 1
# document 삽입
db.park_info.insert_one(doc)
print(db.park_info.count())
# DB에서 겹치는 이름들은 하나로 모아 평균 위경도로 저장 (921개로 압축)
def remove_dup_name():
db_list = list(db.park_info.find({}, {'_id': False}))
count = db.park_info.count()
names = []
lngs = []
lats = []
# name, lng, lat만 뽑아 리스트에 저장
for i in db_list:
names.append(i["Name"])
lngs.append(i["location"]["coordinates"][0])
lats.append(i["location"]["coordinates"][1])
nll = [[''] * 3 for i in range(len(names))] # nll means name, lng, lat
for i in range(0, count):
nll[i][0] = names[i]
nll[i][1] = lngs[i]
nll[i][2] = lats[i]
# for i in range(2309, 2326):
# print(nll[i][0], nll[i][1], nll[i][2])
temp = 0
for i in range(0, count):
tmp_lng = [nll[i][1]]
tmp_lat = [nll[i][2]]
for j in range(i + 1, count):
if nll[i][0] != nll[j][0]:
continue
elif nll[i][0] == '':
continue
else:
temp += 1
# print(nll[j][0], '이 같네요', j, '번째 삭제합니다')
tmp_lng.append(nll[j][1])
tmp_lat.append(nll[j][2])
# 이름 겹치는 값들은 첫번째 인덱스 빼고 ''로 초기화
nll[j][0] = ''
nll[j][1] = 0
nll[j][2] = 0
mean_lng = round(fsum(tmp_lng) / len(tmp_lng), 8)
mean_lat = round(fsum(tmp_lat) / len(tmp_lat), 8)
nll[i][1] = mean_lng
nll[i][2] = mean_lat
tmp_count = 0
for i in range(0, count):
if nll[i][0] == '':
tmp_count += 1
# print(i, '번째 삭제 완료')
continue
print(i, nll[i][0], nll[i][1], nll[i][2])
print('총', tmp_count, '개 삭제합니다')
print(db.park_lot.count())
for i in range(0, count):
if nll[i][0] == '':
db.park_info.delete_one({'park_id': i})
# print(i, '번째 삭제완료')
print(temp, count, db.park_info.count())
while True:
if (db.park_info.count() == 0):
get_data()
else:
remove_dup_name()
break
|
the-stack_0_13484 | """pokerFace URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app import views as vs
urlpatterns = [
path('app/', vs.index),
path('admin/', admin.site.urls),
path('app/index/', vs.index),
path('app/register/', vs.createUser),
path('app/login/', vs.myLogin),
path('app/logout/', vs.myLogout),
path('app/modify/', vs.modify),
path('app/Camera/', vs.CMR),
path('app/Camera2Server/', vs.CMR2server),
path('app/text2audio/', vs.text2audio),
path('app/rank/', vs.rank),
path('app/history/', vs.history),
path('app/deleteHistory/', vs.delete_history),
path('app/updateHistory/', vs.update_history),
path('app/thumbsUp/', vs.thumbs_up),
]
|
the-stack_0_13486 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import AppsOperations, BindingsOperations, CertificatesOperations, ConfigServersOperations, CustomDomainsOperations, DeploymentsOperations, MonitoringSettingsOperations, Operations, RuntimeVersionsOperations, ServicesOperations, SkusOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClient:
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ServicesOperations
:ivar config_servers: ConfigServersOperations operations
:vartype config_servers:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ConfigServersOperations
:ivar monitoring_settings: MonitoringSettingsOperations operations
:vartype monitoring_settings:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.MonitoringSettingsOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.RuntimeVersionsOperations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.SkusOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppPlatformManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.services = ServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.config_servers = ConfigServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.monitoring_settings = MonitoringSettingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.apps = AppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
the-stack_0_13487 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence
from torchmetrics import SSIM as _SSIM
from pytorch_lightning.metrics.utils import deprecated_metrics, void
class SSIM(_SSIM):
@deprecated_metrics(target=_SSIM)
def __init__(
self,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: str = "elementwise_mean",
data_range: Optional[float] = None,
k1: float = 0.01,
k2: float = 0.03,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
"""
This implementation refers to :class:`~torchmetrics.SSIM`.
.. deprecated::
Use :class:`~torchmetrics.SSIM`. Will be removed in v1.5.0.
"""
void(kernel_size, sigma, reduction, data_range, k1, k2, compute_on_step, dist_sync_on_step, process_group)
|
the-stack_0_13488 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='sprofiler',
version='0.1.0',
author='Bryan Brzycki',
author_email='[email protected]',
description='Lightweight profiler with checkpoints',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/bbrzycki/sprofiler',
project_urls={
'Source': 'https://github.com/bbrzycki/sprofiler'
},
packages=setuptools.find_packages(),
# include_package_data=True,
install_requires=[
'numpy>=1.18.1',
# 'scipy>=1.4.1',
# 'astropy>=4.0',
# 'blimpy>=2.0.0',
# 'matplotlib>=3.1.3',
# 'tqdm>=4.47.0',
# 'sphinx-rtd-theme>=0.4.3'
],
classifiers=(
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
),
)
|
the-stack_0_13489 | # coding=utf-8
"""Search SoundCloud playlists for audio."""
from __future__ import absolute_import
import os
import string
import sys
import requests
import soundcloud
from tqdm import tqdm
def sanitize(s):
return ''.join(
c for c in s
if c in '-_.() {}{}'.format(string.ascii_letters, string.digits))
if 'SOUNDCLOUD_API_KEY' in os.environ:
API_KEY = os.environ['SOUNDCLOUD_API_KEY']
else:
API_KEY = "81f430860ad96d8170e3bf1639d4e072"
def scrape(query, include, exclude, quiet, overwrite):
"""Search SoundCloud and download audio from discovered playlists."""
# Launch SoundCloud client.
client = soundcloud.Client(client_id=API_KEY)
# Generator for yielding all results pages.
def pagination(x):
yield x
while x.next_href:
x = client.get(x.next_href)
yield x
# Search SoundCloud for playlists.
for playlists in pagination(
client.get('/playlists',
q=query,
tags=','.join(include) if include else '',
linked_partitioning=1,
representation='compact')):
# Download playlists.
for playlist in playlists.collection:
# Skip playlists containing filter terms.
haystack = (playlist.title +
(' ' + playlist.description
if playlist.description else '')).lower()
if any(needle in haystack for needle in exclude):
continue
# Create directory for playlist.
directory = sanitize(playlist.title)
if directory == '':
continue
if not os.path.exists(directory):
os.mkdir(directory)
# Download tracks in playlist.
for track in client.get(playlist.tracks_uri):
file = os.path.join(directory, sanitize(track.title) + '.mp3')
# Skip existing files.
if os.path.exists(file) and not overwrite:
continue
# Skip tracks that are not allowed to be streamed.
if not track.streamable:
continue
# Skip tracks named with filter terms.
haystack = (track.title + ' ' + track.description + ' ' +
track.tag_list).lower()
if any(needle in haystack for needle in exclude):
continue
# Download track.
r = requests.get(client.get(track.stream_url,
allow_redirects=False).location,
stream=True)
total_size = int(r.headers['content-length'])
chunk_size = 1000000 # 1 MB chunks
with open(file, 'wb') as f:
for data in tqdm(
r.iter_content(chunk_size),
desc=track.title,
total=total_size / chunk_size,
unit='MB',
file=sys.stdout):
f.write(data)
|
the-stack_0_13490 | from django import forms
from .models import Department, Province, District
class DepartmentForm(forms.Form):
department = forms.ModelChoiceField(
queryset=Department.objects.all()
)
class ProvinceForm(DepartmentForm):
province = forms.ModelChoiceField(
queryset=Province.objects.none()
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_bound:
department = self._get_field_value('department')
if department:
self.fields['province'].queryset = Province.objects.filter(
parent=department
)
def _get_field_value(self, name):
field = self.fields[name]
value = field.widget.value_from_datadict(
self.data,
self.files,
self.add_prefix(name)
)
try:
return field.clean(value)
except:
return None
class DistrictForm(ProvinceForm):
district = forms.ModelChoiceField(
queryset=District.objects.none()
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_bound:
province = self._get_field_value('province')
if province:
self.fields['district'].queryset = District.objects.filter(
parent=province
)
UbigeoForm = DistrictForm
|
the-stack_0_13492 | #####EXERCÍCIOS 70 #######
print('-'*50)
print('{^} LOJA DO BARATÃO')
print('-'*50)
menor = cont = caro = total = 0
barato = 'a'
while True:
produto = str(input('DIGITE O NOME DO PRODUTO : '))
cont += 1
preço = float(input('Digite o valor do produto : '))
total += preço
if preço >=1000:
caro += 1
if cont == 1 or preço < menor:
menor = preço
barato = produto
resp = '1'
while resp not in "SN":
resp = str(input('Quer continuar ? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('-'*50)
print(f'{caro} Produtos estão acima de 1000 R$ ')
print(f'O total da compra foi de {total:10.2f}')
print(f'O produto mais barato é {barato} e custou {menor:10.2f}')
print('FIM DO PROGRAMA') |
the-stack_0_13493 | from datetime import date, datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT, period as libperiod
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
from pandas import NaT, Period, Timedelta, Timestamp, offsets
import pandas._testing as tm
class TestPeriodConstruction:
def test_construction(self):
i1 = Period("1/1/2005", freq="M")
i2 = Period("Jan 2005")
assert i1 == i2
i1 = Period("2005", freq="A")
i2 = Period("2005")
i3 = Period("2005", freq="a")
assert i1 == i2
assert i1 == i3
i4 = Period("2005", freq="M")
i5 = Period("2005", freq="m")
assert i1 != i4
assert i4 == i5
i1 = Period.now("Q")
i2 = Period(datetime.now(), freq="Q")
i3 = Period.now("q")
assert i1 == i2
assert i1 == i3
i1 = Period("1982", freq="min")
i2 = Period("1982", freq="MIN")
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq="D")
i2 = Period("3/1/2005", freq="D")
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq="d")
assert i1 == i3
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
msg = "Must supply freq for ordinal value"
with pytest.raises(ValueError, match=msg):
Period(ordinal=200701)
msg = "Invalid frequency: X"
with pytest.raises(ValueError, match=msg):
Period("2007-1-1", freq="X")
# GH#34703 tuple freq disallowed
with pytest.raises(TypeError, match="pass as a string instead"):
Period("1982", freq=("Min", 1))
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period("3/10/12", freq="B")
i2 = Period("3/10/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/11/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/12/12", freq="D")
assert i1 == i2.asfreq("B")
i3 = Period("3/10/12", freq="b")
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq="B")
i2 = Period("3/12/12", freq="B")
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq="Q")
i2 = Period("1/1/2005", freq="Q")
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq="Q")
i2 = Period("9/1/2005", freq="Q")
assert i1 == i2
i1 = Period("2005Q1")
i2 = Period(year=2005, quarter=1, freq="Q")
i3 = Period("2005q1")
assert i1 == i2
assert i1 == i3
i1 = Period("05Q1")
assert i1 == i2
lower = Period("05q1")
assert i1 == lower
i1 = Period("1Q2005")
assert i1 == i2
lower = Period("1q2005")
assert i1 == lower
i1 = Period("1Q05")
assert i1 == i2
lower = Period("1q05")
assert i1 == lower
i1 = Period("4Q1984")
assert i1.year == 1984
lower = Period("4q1984")
assert i1 == lower
def test_construction_month(self):
expected = Period("2007-01", freq="M")
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period(200701, freq="M")
assert i1 == expected
i1 = Period(ordinal=200701, freq="M")
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period(
"1/1/2005", freq="M"
)
assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="A")
assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M")
assert Period("3/10/12", freq=offsets.BusinessDay()) == Period(
"3/10/12", freq="B"
)
assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D")
assert Period(
year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=1, freq="Q")
assert Period(
year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=2, freq="Q")
assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(
year=2005, month=3, day=1, freq="D"
)
assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(
year=2012, month=3, day=10, freq="B"
)
expected = Period("2005-03-01", freq="3D")
assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected
assert Period(year=2005, month=3, day=1, freq="3D") == expected
assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(
year=2012, month=3, day=10, freq="3B"
)
assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M")
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq="M")
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
def test_invalid_arguments(self):
msg = "Must supply freq for datetime value"
with pytest.raises(ValueError, match=msg):
Period(datetime.now())
with pytest.raises(ValueError, match=msg):
Period(datetime.now().date())
msg = "Value must be Period, string, integer, or datetime"
with pytest.raises(ValueError, match=msg):
Period(1.6, freq="D")
msg = "Ordinal must be an integer"
with pytest.raises(ValueError, match=msg):
Period(ordinal=1.6, freq="D")
msg = "Only value or ordinal but not both should be given but not both"
with pytest.raises(ValueError, match=msg):
Period(ordinal=2, value=1, freq="D")
msg = "If value is None, freq cannot be None"
with pytest.raises(ValueError, match=msg):
Period(month=1)
msg = "Given date string not likely a datetime"
with pytest.raises(ValueError, match=msg):
Period("-2000", "A")
msg = "day is out of range for month"
with pytest.raises(DateParseError, match=msg):
Period("0", "A")
msg = "Unknown datetime string format, unable to parse"
with pytest.raises(DateParseError, match=msg):
Period("1/1/-2000", "A")
def test_constructor_corner(self):
expected = Period("2007-01", freq="2M")
assert Period(year=2007, month=1, freq="2M") == expected
assert Period(None) is NaT
p = Period("2007-01-01", freq="D")
result = Period(p, freq="A")
exp = Period("2007", freq="A")
assert result == exp
def test_constructor_infer_freq(self):
p = Period("2007-01-01")
assert p.freq == "D"
p = Period("2007-01-01 07")
assert p.freq == "H"
p = Period("2007-01-01 07:10")
assert p.freq == "T"
p = Period("2007-01-01 07:10:15")
assert p.freq == "S"
p = Period("2007-01-01 07:10:15.123")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123000")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123400")
assert p.freq == "U"
def test_multiples(self):
result1 = Period("1989", freq="2A")
result2 = Period("1989", freq="A")
assert result1.ordinal == result2.ordinal
assert result1.freqstr == "2A-DEC"
assert result2.freqstr == "A-DEC"
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
freq = f"Q-{month}"
exp = Period("1989Q3", freq=freq)
assert "1989Q3" in str(exp)
stamp = exp.to_timestamp("D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp("3D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
freq = f"A-{month}"
exp = Period("1989", freq=freq)
stamp = exp.to_timestamp("D", how="end") + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("num", range(10, 17))
def test_period_cons_weekly(self, num, day):
daystr = f"2011-02-{num}"
freq = f"W-{day}"
result = Period(daystr, freq=freq)
expected = Period(daystr, freq="D").asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = Period("2011-01", freq="M")
res = Period._from_ordinal(p.ordinal, freq="M")
assert p == res
assert isinstance(res, Period)
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_construct_from_nat_string_and_freq(self, freq):
per = Period("NaT", freq=freq)
assert per is NaT
per = Period("NaT", freq="2" + freq)
assert per is NaT
per = Period("NaT", freq="3" + freq)
assert per is NaT
def test_period_cons_nat(self):
p = Period("nat", freq="W-SUN")
assert p is NaT
p = Period(iNaT, freq="D")
assert p is NaT
p = Period(iNaT, freq="3D")
assert p is NaT
p = Period(iNaT, freq="1D1H")
assert p is NaT
p = Period("NaT")
assert p is NaT
p = Period(iNaT)
assert p is NaT
def test_period_cons_mult(self):
p1 = Period("2011-01", freq="3M")
p2 = Period("2011-01", freq="M")
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == "3M"
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == "M"
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
msg = "Frequency must be positive, because it represents span: -3M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-3M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0M")
def test_period_cons_combined(self):
p = [
(
Period("2011-01", freq="1D1H"),
Period("2011-01", freq="1H1D"),
Period("2011-01", freq="H"),
),
(
Period(ordinal=1, freq="1D1H"),
Period(ordinal=1, freq="1H1D"),
Period(ordinal=1, freq="H"),
),
]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == "25H"
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == "25H"
assert p3.freq == offsets.Hour()
assert p3.freqstr == "H"
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
msg = "Frequency must be positive, because it represents span: -25H"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1H1D")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1H1D")
msg = "Frequency must be positive, because it represents span: 0D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0D0H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="0D0H")
# You can only combine together day and intraday offsets
msg = "Invalid frequency: 1W1D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1W1D")
msg = "Invalid frequency: 1D1W"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
@pytest.mark.parametrize("day", ["1970/01/01 ", "2020-12-31 ", "1981/09/13 "])
@pytest.mark.parametrize("hour", ["00:00:00", "00:00:01", "23:59:59", "12:00:59"])
@pytest.mark.parametrize(
"sec_float, expected",
[
(".000000001", 1),
(".000000999", 999),
(".123456789", 789),
(".999999999", 999),
],
)
def test_period_constructor_nanosecond(self, day, hour, sec_float, expected):
# GH 34621
assert Period(day + hour + sec_float).start_time.nanosecond == expected
@pytest.mark.parametrize("hour", range(24))
def test_period_large_ordinal(self, hour):
# Issue #36430
# Integer overflow for Period over the maximum timestamp
p = Period(ordinal=2562048 + hour, freq="1H")
assert p.hour == hour
class TestPeriodMethods:
def test_round_trip(self):
p = Period("2000Q1")
new_p = tm.round_trip_pickle(p)
assert new_p == p
def test_hash(self):
assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M"))
assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M"))
# --------------------------------------------------------------
# to_timestamp
@pytest.mark.parametrize("tzstr", ["Europe/Brussels", "Asia/Tokyo", "US/Pacific"])
def test_to_timestamp_tz_arg(self, tzstr):
# GH#34522 tz kwarg deprecated
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr)
exp = Timestamp("31/12/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
@pytest.mark.parametrize(
"tzstr",
["dateutil/Europe/Brussels", "dateutil/Asia/Tokyo", "dateutil/US/Pacific"],
)
def test_to_timestamp_tz_arg_dateutil(self, tzstr):
tz = maybe_get_tz(tzstr)
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
def test_to_timestamp_tz_arg_dateutil_from_string(self):
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels")
assert p.tz == dateutil_gettz("Europe/Brussels")
def test_to_timestamp_mult(self):
p = Period("2011-01", freq="M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-02-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
p = Period("2011-01", freq="3M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-04-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
def test_to_timestamp(self):
p = Period("1982", freq="A")
start_ts = p.to_timestamp(how="S")
aliases = ["s", "StarT", "BEGIn"]
for a in aliases:
assert start_ts == p.to_timestamp("D", how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp("3D", how=a)
end_ts = p.to_timestamp(how="E")
aliases = ["e", "end", "FINIsH"]
for a in aliases:
assert end_ts == p.to_timestamp("D", how=a)
assert end_ts == p.to_timestamp("3D", how=a)
from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "S"]
def _ex(p):
if p.freq == "B":
return p.start_time + Timedelta(days=1, nanoseconds=-1)
return Timestamp((p + p.freq).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period("1982", freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how="S")
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period("1985", freq="A")
result = p.to_timestamp("H", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("3H", how="end")
assert result == expected
result = p.to_timestamp("T", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("2T", how="end")
assert result == expected
result = p.to_timestamp(how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp("H", how="start")
assert result == expected
result = p.to_timestamp("T", how="start")
assert result == expected
result = p.to_timestamp("S", how="start")
assert result == expected
result = p.to_timestamp("3H", how="start")
assert result == expected
result = p.to_timestamp("5S", how="start")
assert result == expected
def test_to_timestamp_business_end(self):
per = Period("1990-01-05", "B") # Friday
result = per.to_timestamp("B", how="E")
expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize(
"ts, expected",
[
("1970-01-01 00:00:00", 0),
("1970-01-01 00:00:00.000001", 1),
("1970-01-01 00:00:00.00001", 10),
("1970-01-01 00:00:00.499", 499000),
("1999-12-31 23:59:59.999", 999000),
("1999-12-31 23:59:59.999999", 999999),
("2050-12-31 23:59:59.5", 500000),
("2050-12-31 23:59:59.500001", 500001),
("2050-12-31 23:59:59.123456", 123456),
],
)
@pytest.mark.parametrize("freq", [None, "us", "ns"])
def test_to_timestamp_microsecond(self, ts, expected, freq):
# GH 24444
result = Period(ts).to_timestamp(freq=freq).microsecond
assert result == expected
# --------------------------------------------------------------
# Rendering: __repr__, strftime, etc
def test_repr(self):
p = Period("Jan-2000")
assert "2000-01" in repr(p)
p = Period("2000-12-15")
assert "2000-12-15" in repr(p)
def test_repr_nat(self):
p = Period("nat", freq="M")
assert repr(NaT) in repr(p)
def test_millisecond_repr(self):
p = Period("2000-01-01 12:15:02.123")
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period("2000-01-01 12:15:02.123567")
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
# GH#3363
p = Period("2000-1-1 12:34:12", freq="S")
res = p.strftime("%Y-%m-%d %H:%M:%S")
assert res == "2000-01-01 12:34:12"
assert isinstance(res, str)
class TestPeriodProperties:
"""Test properties such as year, month, weekday, etc...."""
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_is_leap_year(self, freq):
# GH 13727
p = Period("2000-01-01 00:00:00", freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period("1999-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
p = Period("2004-01-01 00:00:00", freq=freq)
assert p.is_leap_year
p = Period("2100-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="M")
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_freq_str(self):
i1 = Period("1982", freq="Min")
assert i1.freq == offsets.Minute()
assert i1.freqstr == "T"
def test_period_deprecated_freq(self):
cases = {
"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"],
}
msg = INVALID_FREQ_ERR_MSG
for exp, freqs in cases.items():
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period("2016-03-01 09:00", freq=freq)
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period("2016-03-01 09:00", freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def _period_constructor(bound, offset):
return Period(
year=bound.year,
month=bound.month,
day=bound.day,
hour=bound.hour,
minute=bound.minute,
second=bound.second + offset,
freq="us",
)
@pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)])
@pytest.mark.parametrize("period_property", ["start_time", "end_time"])
def test_outter_bounds_start_and_end_time(self, bound, offset, period_property):
# GH #13346
period = TestPeriodProperties._period_constructor(bound, offset)
with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond"):
getattr(period, period_property)
@pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)])
@pytest.mark.parametrize("period_property", ["start_time", "end_time"])
def test_inner_bounds_start_and_end_time(self, bound, offset, period_property):
# GH #13346
period = TestPeriodProperties._period_constructor(bound, -offset)
expected = period.to_timestamp().round(freq="S")
assert getattr(period, period_property).round(freq="S") == expected
expected = (bound - offset * Timedelta(1, unit="S")).floor("S")
assert getattr(period, period_property).floor("S") == expected
def test_start_time(self):
freq_lst = ["A", "Q", "M", "D", "H", "T", "S"]
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period("2012", freq=f)
assert p.start_time == xp
assert Period("2012", freq="B").start_time == datetime(2012, 1, 2)
assert Period("2012", freq="W").start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period("2012", freq="A")
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="Q")
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period("2012", freq="M")
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="D")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period("2012", freq="H")
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="B")
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period("2012", freq="W")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period("2012", freq="15D")
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period("2012", freq="1D1H")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="1H1D")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_end_time_business_friday(self):
# GH#34449
per = Period("1990-01-05", "B")
result = per.end_time
expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1)
assert result == expected
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period("2013-1-1", "W-SAT")
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq="A", year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq="M", year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq="W", year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq="B", year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29
d_date = Period(freq="D", year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq="H", year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq="2H", year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert (
Period(freq="H", year=2012, month=2, day=1, hour=0).days_in_month == 29
)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert (
Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month
== 29
)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert (
Period(
freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0
).days_in_month
== 29
)
class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'"
with pytest.raises(ValueError, match=msg):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
class TestPeriodComparisons:
def test_comparison_same_period_different_object(self):
# Separate Period objects for the same period
left = Period("2000-01", "M")
right = Period("2000-01", "M")
assert left == right
assert left >= right
assert left <= right
assert not left < right
assert not left > right
def test_comparison_same_freq(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
assert not jan == feb
assert jan != feb
assert jan < feb
assert jan <= feb
assert not jan > feb
assert not jan >= feb
def test_comparison_mismatched_freq(self):
jan = Period("2000-01", "M")
day = Period("2012-01-01", "D")
assert not jan == day
assert jan != day
msg = r"Input has different freq=D from Period\(freq=M\)"
with pytest.raises(IncompatibleFrequency, match=msg):
jan < day
with pytest.raises(IncompatibleFrequency, match=msg):
jan <= day
with pytest.raises(IncompatibleFrequency, match=msg):
jan > day
with pytest.raises(IncompatibleFrequency, match=msg):
jan >= day
def test_comparison_invalid_type(self):
jan = Period("2000-01", "M")
assert not jan == 1
assert jan != 1
int_or_per = "'(Period|int)'"
msg = f"not supported between instances of {int_or_per} and {int_or_per}"
for left, right in [(jan, 1), (1, jan)]:
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
def test_sort_periods(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
mar = Period("2000-03", "M")
periods = [mar, jan, feb]
correctPeriods = [jan, feb, mar]
assert sorted(periods) == correctPeriods
def test_period_cmp_nat(self):
p = Period("2011-01-01", freq="D")
t = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(NaT, p),
(p, NaT),
(NaT, t),
(t, NaT),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestArithmetic:
def test_sub_delta(self):
left, right = Period("2011", freq="A"), Period("2007", freq="A")
result = left - right
assert result == 4 * right.freq
msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
left - Period("2007-01", freq="M")
def test_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_add_sub_nat(self):
# GH#13071
p = Period("2011-01", freq="M")
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
def test_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = "|".join(
[
r"unsupported operand type\(s\)",
"can only concatenate str",
"must be str, not Period",
]
)
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
ids = ["identity", "Series", "Index"]
@pytest.mark.parametrize("lbox", boxes, ids=ids)
@pytest.mark.parametrize("rbox", boxes, ids=ids)
def test_add_timestamp_raises(self, rbox, lbox):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
# We may get a different message depending on which class raises
# the error.
msg = "|".join(
[
"cannot add",
"unsupported operand",
"can only operate on a",
"incompatible type",
"ufunc add cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
lbox(ts) + rbox(per)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(ts)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(per)
def test_sub(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_add_offset(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period("2012-03", freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period("2011-04-02", freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + np.timedelta64(2, "D") == exp
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
np.timedelta64(2, "D") + p
exp = Period("2011-04-02", freq=freq)
assert p + np.timedelta64(3600 * 24, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600 * 24, "s") + p
exp = Period("2011-03-30", freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert p + np.timedelta64(3, "h") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3, "h") + p
exp = Period("2011-04-01 10:00", freq=freq)
assert p + np.timedelta64(3600, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600, "s") + p
exp = Period("2011-04-01 11:00", freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
def test_sub_offset(self):
# freq is DateOffset
msg = "Input has different freq|Input cannot be converted to Period"
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
assert p - offsets.YearEnd(2) == Period("2009", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
assert p - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert p - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
assert p - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert p - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert p - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert p - np.timedelta64(3600 * 24, "s") == Period("2011-03-31", freq=freq)
assert p - timedelta(-2) == Period("2011-04-03", freq=freq)
assert p - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
assert p - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert p - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert p - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_period_addsub_nat(self, freq):
per = Period("2011-01", freq=freq)
# For subtraction, NaT is treated as another Period object
assert NaT - per is NaT
assert per - NaT is NaT
# For addition, NaT is treated as offset-like
assert NaT + per is NaT
assert per + NaT is NaT
def test_period_ops_offset(self):
p = Period("2011-04-01", freq="D")
result = p + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = p - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
p + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
p - offsets.Hour(2)
def test_period_immutable():
# see gh-17116
msg = "not writable"
per = Period("2014Q1")
with pytest.raises(AttributeError, match=msg):
per.ordinal = 14
freq = per.freq
with pytest.raises(AttributeError, match=msg):
per.freq = 2 * freq
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
assert per1.day == 7
def test_negone_ordinals():
freqs = ["A", "M", "Q", "D", "H", "T", "S"]
period = Period(ordinal=-1, freq="D")
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
assert period.year == 1969
period = Period(ordinal=-1, freq="B")
repr(period)
period = Period(ordinal=-1, freq="W")
repr(period)
def test_invalid_frequency_error_message():
msg = "Invalid frequency: <WeekOfMonth: week=0, weekday=0>"
with pytest.raises(ValueError, match=msg):
Period("2012-01-02", freq="WOM-1MON")
|
the-stack_0_13494 | # -*- coding: utf8 -*-
import sys
import unittest
import platform
IS_PYPY = "PyPy" == platform.python_implementation()
try:
from pygame.tests.test_utils import arrinter
except NameError:
pass
import pygame
init_called = quit_called = 0
def __PYGAMEinit__(): # called automatically by pygame.init()
global init_called
init_called = init_called + 1
pygame.register_quit(pygame_quit)
# Returning False indicates that the initialization has failed. It is
# purposely done here to test that failing modules are reported.
return False
def pygame_quit():
global quit_called
quit_called = quit_called + 1
quit_hook_ran = 0
def quit_hook():
global quit_hook_ran
quit_hook_ran = 1
class BaseModuleTest(unittest.TestCase):
def tearDown(self):
# Clean up after each test method.
pygame.quit()
def testAutoInit(self):
pygame.init()
pygame.quit()
self.assertEqual(init_called, 1)
self.assertEqual(quit_called, 1)
def test_get_sdl_byteorder(self):
"""Ensure the SDL byte order is valid"""
byte_order = pygame.get_sdl_byteorder()
expected_options = (pygame.LIL_ENDIAN, pygame.BIG_ENDIAN)
self.assertIn(byte_order, expected_options)
def test_get_sdl_version(self):
"""Ensure the SDL version is valid"""
self.assertEqual(len(pygame.get_sdl_version()), 3)
class ExporterBase(object):
def __init__(self, shape, typechar, itemsize):
import ctypes
ndim = len(shape)
self.ndim = ndim
self.shape = tuple(shape)
array_len = 1
for d in shape:
array_len *= d
self.size = itemsize * array_len
self.parent = ctypes.create_string_buffer(self.size)
self.itemsize = itemsize
strides = [itemsize] * ndim
for i in range(ndim - 1, 0, -1):
strides[i - 1] = strides[i] * shape[i]
self.strides = tuple(strides)
self.data = ctypes.addressof(self.parent), False
if self.itemsize == 1:
byteorder = "|"
elif sys.byteorder == "big":
byteorder = ">"
else:
byteorder = "<"
self.typestr = byteorder + typechar + str(self.itemsize)
def assertSame(self, proxy, obj):
self.assertEqual(proxy.length, obj.size)
iface = proxy.__array_interface__
self.assertEqual(iface["typestr"], obj.typestr)
self.assertEqual(iface["shape"], obj.shape)
self.assertEqual(iface["strides"], obj.strides)
self.assertEqual(iface["data"], obj.data)
def test_PgObject_GetBuffer_array_interface(self):
from pygame.bufferproxy import BufferProxy
class Exporter(self.ExporterBase):
def get__array_interface__(self):
return {
"version": 3,
"typestr": self.typestr,
"shape": self.shape,
"strides": self.strides,
"data": self.data,
}
__array_interface__ = property(get__array_interface__)
# Should be ignored by PgObject_GetBuffer
__array_struct__ = property(lambda self: None)
_shape = [2, 3, 5, 7, 11] # Some prime numbers
for ndim in range(1, len(_shape)):
o = Exporter(_shape[0:ndim], "i", 2)
v = BufferProxy(o)
self.assertSame(v, o)
ndim = 2
shape = _shape[0:ndim]
for typechar in ("i", "u"):
for itemsize in (1, 2, 4, 8):
o = Exporter(shape, typechar, itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
for itemsize in (4, 8):
o = Exporter(shape, "f", itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
# Is the dict received from an exporting object properly released?
# The dict should be freed before PgObject_GetBuffer returns.
# When the BufferProxy v's length property is referenced, v calls
# PgObject_GetBuffer, which in turn references Exporter2 o's
# __array_interface__ property. The Exporter2 instance o returns a
# dict subclass for which it keeps both a regular reference and a
# weak reference. The regular reference should be the only
# remaining reference when PgObject_GetBuffer returns. This is
# verified by first checking the weak reference both before and
# after the regular reference held by o is removed.
import weakref, gc
class NoDictError(RuntimeError):
pass
class WRDict(dict):
"""Weak referenceable dict"""
pass
class Exporter2(Exporter):
def get__array_interface__2(self):
self.d = WRDict(Exporter.get__array_interface__(self))
self.dict_ref = weakref.ref(self.d)
return self.d
__array_interface__ = property(get__array_interface__2)
def free_dict(self):
self.d = None
def is_dict_alive(self):
try:
return self.dict_ref() is not None
except AttributeError:
raise NoDictError("__array_interface__ is unread")
o = Exporter2((2, 4), "u", 4)
v = BufferProxy(o)
self.assertRaises(NoDictError, o.is_dict_alive)
length = v.length
self.assertTrue(o.is_dict_alive())
o.free_dict()
gc.collect()
self.assertFalse(o.is_dict_alive())
def test_GetView_array_struct(self):
from pygame.bufferproxy import BufferProxy
class Exporter(self.ExporterBase):
def __init__(self, shape, typechar, itemsize):
super(Exporter, self).__init__(shape, typechar, itemsize)
self.view = BufferProxy(self.__dict__)
def get__array_struct__(self):
return self.view.__array_struct__
__array_struct__ = property(get__array_struct__)
# Should not cause PgObject_GetBuffer to fail
__array_interface__ = property(lambda self: None)
_shape = [2, 3, 5, 7, 11] # Some prime numbers
for ndim in range(1, len(_shape)):
o = Exporter(_shape[0:ndim], "i", 2)
v = BufferProxy(o)
self.assertSame(v, o)
ndim = 2
shape = _shape[0:ndim]
for typechar in ("i", "u"):
for itemsize in (1, 2, 4, 8):
o = Exporter(shape, typechar, itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
for itemsize in (4, 8):
o = Exporter(shape, "f", itemsize)
v = BufferProxy(o)
self.assertSame(v, o)
# Check returned cobject/capsule reference count
try:
from sys import getrefcount
except ImportError:
# PyPy: no reference counting
pass
else:
o = Exporter(shape, typechar, itemsize)
self.assertEqual(getrefcount(o.__array_struct__), 1)
if pygame.HAVE_NEWBUF:
from pygame.tests.test_utils import buftools
def NEWBUF_assertSame(self, proxy, exp):
buftools = self.buftools
Importer = buftools.Importer
self.assertEqual(proxy.length, exp.len)
imp = Importer(proxy, buftools.PyBUF_RECORDS_RO)
self.assertEqual(imp.readonly, exp.readonly)
self.assertEqual(imp.format, exp.format)
self.assertEqual(imp.itemsize, exp.itemsize)
self.assertEqual(imp.ndim, exp.ndim)
self.assertEqual(imp.shape, exp.shape)
self.assertEqual(imp.strides, exp.strides)
self.assertTrue(imp.suboffsets is None)
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
@unittest.skipIf(IS_PYPY, "pypy2 no likey")
def test_newbuf(self):
from pygame.bufferproxy import BufferProxy
Exporter = self.buftools.Exporter
_shape = [2, 3, 5, 7, 11] # Some prime numbers
for ndim in range(1, len(_shape)):
o = Exporter(_shape[0:ndim], "=h")
v = BufferProxy(o)
self.NEWBUF_assertSame(v, o)
ndim = 2
shape = _shape[0:ndim]
for format in [
"b",
"B",
"=h",
"=H",
"=i",
"=I",
"=q",
"=Q",
"f",
"d",
"1h",
"=1h",
"x",
"1x",
"2x",
"3x",
"4x",
"5x",
"6x",
"7x",
"8x",
"9x",
]:
o = Exporter(shape, format)
v = BufferProxy(o)
self.NEWBUF_assertSame(v, o)
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
def test_bad_format(self):
from pygame.bufferproxy import BufferProxy
from pygame.newbuffer import BufferMixin
from ctypes import create_string_buffer, addressof
buftools = self.buftools
Exporter = buftools.Exporter
Importer = buftools.Importer
PyBUF_FORMAT = buftools.PyBUF_FORMAT
for format in [
"",
"=",
"1",
" ",
"2h",
"=2h",
"0x",
"11x",
"=!",
"h ",
" h",
"hh",
"?",
]:
exp = Exporter((1,), format, itemsize=2)
b = BufferProxy(exp)
self.assertRaises(ValueError, Importer, b, PyBUF_FORMAT)
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
@unittest.skipIf(IS_PYPY, "fails on pypy")
def test_PgDict_AsBuffer_PyBUF_flags(self):
from pygame.bufferproxy import BufferProxy
is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
fsys, frev = ("<", ">") if is_lil_endian else (">", "<")
buftools = self.buftools
Importer = buftools.Importer
a = BufferProxy(
{"typestr": "|u4", "shape": (10, 2), "data": (9, False)}
) # 9? No data accesses.
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 4)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 9)
b = Importer(a, buftools.PyBUF_WRITABLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 4)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 9)
b = Importer(a, buftools.PyBUF_ND)
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, 4)
self.assertEqual(b.shape, (10, 2))
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 9)
a = BufferProxy(
{
"typestr": fsys + "i2",
"shape": (5, 10),
"strides": (24, 2),
"data": (42, False),
}
) # 42? No data accesses.
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, 2)
self.assertTrue(b.format is None)
self.assertEqual(b.len, 100)
self.assertEqual(b.itemsize, 2)
self.assertEqual(b.shape, (5, 10))
self.assertEqual(b.strides, (24, 2))
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 42)
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, 2)
self.assertEqual(b.format, "=h")
self.assertEqual(b.len, 100)
self.assertEqual(b.itemsize, 2)
self.assertEqual(b.shape, (5, 10))
self.assertEqual(b.strides, (24, 2))
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, 42)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
a = BufferProxy(
{
"typestr": frev + "i2",
"shape": (3, 5, 10),
"strides": (120, 24, 2),
"data": (1000000, True),
}
) # 1000000? No data accesses.
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, 3)
self.assertEqual(b.format, frev + "h")
self.assertEqual(b.len, 300)
self.assertEqual(b.itemsize, 2)
self.assertEqual(b.shape, (3, 5, 10))
self.assertEqual(b.strides, (120, 24, 2))
self.assertTrue(b.suboffsets is None)
self.assertTrue(b.readonly)
self.assertEqual(b.buf, 1000000)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FULL)
@unittest.skipIf(IS_PYPY or (not pygame.HAVE_NEWBUF), "newbuf with ctypes")
def test_PgObject_AsBuffer_PyBUF_flags(self):
from pygame.bufferproxy import BufferProxy
import ctypes
is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
fsys, frev = ("<", ">") if is_lil_endian else (">", "<")
buftools = self.buftools
Importer = buftools.Importer
e = arrinter.Exporter(
(10, 2), typekind="f", itemsize=ctypes.sizeof(ctypes.c_double)
)
a = BufferProxy(e)
b = Importer(a, buftools.PyBUF_SIMPLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
b = Importer(a, buftools.PyBUF_WRITABLE)
self.assertEqual(b.ndim, 0)
self.assertTrue(b.format is None)
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertTrue(b.shape is None)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
b = Importer(a, buftools.PyBUF_ND)
self.assertEqual(b.ndim, e.nd)
self.assertTrue(b.format is None)
self.assertEqual(b.len, a.length)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertTrue(b.strides is None)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
e = arrinter.Exporter((5, 10), typekind="i", itemsize=2, strides=(24, 2))
a = BufferProxy(e)
b = Importer(a, buftools.PyBUF_STRIDES)
self.assertEqual(b.ndim, e.nd)
self.assertTrue(b.format is None)
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertEqual(b.strides, e.strides)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, e.nd)
self.assertEqual(b.format, "=h")
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertEqual(b.strides, e.strides)
self.assertTrue(b.suboffsets is None)
self.assertFalse(b.readonly)
self.assertEqual(b.buf, e.data)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG)
e = arrinter.Exporter(
(3, 5, 10),
typekind="i",
itemsize=2,
strides=(120, 24, 2),
flags=arrinter.PAI_ALIGNED,
)
a = BufferProxy(e)
b = Importer(a, buftools.PyBUF_FULL_RO)
self.assertEqual(b.ndim, e.nd)
self.assertEqual(b.format, frev + "h")
self.assertEqual(b.len, e.len)
self.assertEqual(b.itemsize, e.itemsize)
self.assertEqual(b.shape, e.shape)
self.assertEqual(b.strides, e.strides)
self.assertTrue(b.suboffsets is None)
self.assertTrue(b.readonly)
self.assertEqual(b.buf, e.data)
self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FULL)
def test_PgObject_GetBuffer_exception(self):
# For consistency with surfarray
from pygame.bufferproxy import BufferProxy
bp = BufferProxy(1)
self.assertRaises(ValueError, getattr, bp, "length")
def not_init_assertions(self):
self.assertFalse(pygame.get_init(), "pygame shouldn't be initialized")
self.assertFalse(pygame.display.get_init(), "display shouldn't be initialized")
if "pygame.mixer" in sys.modules:
self.assertFalse(pygame.mixer.get_init(), "mixer shouldn't be initialized")
if "pygame.font" in sys.modules:
self.assertFalse(pygame.font.get_init(), "init shouldn't be initialized")
## !!! TODO : Remove when scrap works for OS X
import platform
if platform.system().startswith("Darwin"):
return
try:
self.assertRaises(pygame.error, pygame.scrap.get)
except NotImplementedError:
# Scrap is optional.
pass
# pygame.cdrom
# pygame.joystick
def init_assertions(self):
self.assertTrue(pygame.get_init())
self.assertTrue(pygame.display.get_init())
if "pygame.mixer" in sys.modules:
self.assertTrue(pygame.mixer.get_init())
if "pygame.font" in sys.modules:
self.assertTrue(pygame.font.get_init())
def test_quit__and_init(self):
# __doc__ (as of 2008-06-25) for pygame.base.quit:
# pygame.quit(): return None
# uninitialize all pygame modules
# Make sure everything is not init
self.not_init_assertions()
# Initiate it
pygame.init()
# Check
self.init_assertions()
# Quit
pygame.quit()
# All modules have quit
self.not_init_assertions()
def test_register_quit(self):
"""Ensure that a registered function is called on quit()"""
self.assertFalse(quit_hook_ran)
pygame.init()
pygame.register_quit(quit_hook)
pygame.quit()
self.assertTrue(quit_hook_ran)
def test_get_error(self):
# __doc__ (as of 2008-08-02) for pygame.base.get_error:
# pygame.get_error(): return errorstr
# get the current error message
#
# SDL maintains an internal error message. This message will usually
# be given to you when pygame.error is raised. You will rarely need to
# call this function.
#
# The first error could be all sorts of nonsense or empty.
e = pygame.get_error()
pygame.set_error("hi")
self.assertEqual(pygame.get_error(), "hi")
pygame.set_error("")
self.assertEqual(pygame.get_error(), "")
def test_set_error(self):
# The first error could be all sorts of nonsense or empty.
e = pygame.get_error()
pygame.set_error("hi")
self.assertEqual(pygame.get_error(), "hi")
pygame.set_error("")
self.assertEqual(pygame.get_error(), "")
def test_unicode_error(self):
if sys.version_info.major > 2:
pygame.set_error(u"你好")
self.assertEqual(u"你好", pygame.get_error())
else:
# no unicode objects for now
pygame.set_error(u"你好")
encstr = u"你好".encode("utf8")
self.assertEqual(encstr, pygame.get_error())
def test_init(self):
"""Ensures init() works properly."""
# Make sure nothing initialized.
self.not_init_assertions()
# The exact number of modules can change, but it should never be < 0.
expected_min_passes = 0
# The __PYGAMEinit__ function in this module returns False, so this
# should give a fail count of 1. All other modules should pass.
expected_fails = 1
passes, fails = pygame.init()
self.init_assertions()
self.assertGreaterEqual(passes, expected_min_passes)
self.assertEqual(fails, expected_fails)
def test_get_init(self):
# Test if get_init() gets the init state.
self.assertFalse(pygame.get_init())
def test_get_init__after_init(self):
# Test if get_init() gets the init state after pygame.init() called.
pygame.init()
self.assertTrue(pygame.get_init())
def test_get_init__after_quit(self):
# Test if get_init() gets the init state after pygame.quit() called.
pygame.init()
pygame.quit()
self.assertFalse(pygame.get_init())
def todo_test_segfault(self):
# __doc__ (as of 2008-08-02) for pygame.base.segfault:
# crash
self.fail()
if __name__ == "__main__":
unittest.main()
|
the-stack_0_13496 | from datetime import datetime, timedelta
import fundamental_analysis.financial_statements_entries as fi
import fundamental_analysis.supporting_metrics as me
from fundamental_analysis.metrics_helpers import FundamentalMetricsHelpers
from matilda.quantitative_analysis.risk_factor_modeling import asset_pricing_model
import macroeconomic_analysis.macroeconomic_analysis as macro
import numpy as np
from functools import partial
def cost_of_preferred_stock(stock, date=datetime.now(), lookback_period=timedelta(days=0), period: str = 'FY'):
preferred_dividends = fi.preferred_dividends(stock=stock, date=date, lookback_period=lookback_period, period=period)
market_price_of_preferred = fi.preferred_stock_value(stock=stock, date=date, lookback_period=lookback_period,
period=period)
return preferred_dividends / market_price_of_preferred
def cost_of_debt(stock, date=datetime.now(), lookback_period=timedelta(days=0), period: str = 'FY'):
interest_rate = fi.interest_expense(stock=stock, date=date, lookback_period=lookback_period,
period=period) / fi.total_long_term_debt(stock=stock, date=date,
lookback_period=lookback_period,
period=period)
tax_rate = fi.income_tax_expense(stock=stock, date=date, lookback_period=lookback_period,
period=period) / me.earnings_before_taxes(stock=stock, date=date,
lookback_period=lookback_period,
period=period)
return abs(interest_rate * (1 - tax_rate))
def cost_of_equity_capm(stock: str, from_date: datetime = datetime.now() - timedelta(days=365 * 5),
to_date: datetime = datetime.now(),
beta_period='Monthly',
benchmark: str = '^GSPC'):
beta = required_rr.asset_pricing_wrapper(model='CAPM', portfolio=stock, benchmark=benchmark, period=beta_period,
from_date=from_date, to_date=to_date).params[1]
risk_free_rate = macro.cumulative_risk_free_rate(from_date=to_date - timedelta(days=365), to_date=to_date)
risk_premium = macro.cumulative_market_premium(from_date=to_date - timedelta(days=365), to_date=to_date)
return risk_free_rate + beta * risk_premium
def cost_of_equity_ddm(stock, date=datetime.now(), lookback_period=timedelta(days=0), period: str = 'FY',
diluted_shares=True):
stock_price = me.market_price(stock=stock, date=date, lookback_period=lookback_period)
this_period_dividend = me.dividend_per_share(stock=stock, date=date, lookback_period=lookback_period, period=period,
diluted_shares=diluted_shares)
growth_rate = FundamentalMetricsHelpers(metric=partial(me.dividend_per_share), stock=stock, date=date) \
.metric_growth_rate(lookback_period=lookback_period, period=period)
next_period_dividend = this_period_dividend * (1 + growth_rate)
return (next_period_dividend / stock_price) + growth_rate
"""
Summary: Calculate the cost of equity for WACC using the Bond yield plus risk premium method.
PARA bond_yield: The company's interest rate on long-term debt.
PARA risk_premium: The company's risk premium usually 3% to 5%.
"""
def cost_of_equity_byprp(bond_yield: float, risk_premium: float):
return bond_yield + risk_premium
def weighted_average_cost_of_capital(stock, date=datetime.now(), lookback_period: timedelta = timedelta(days=0),
lookback_lookback_period: timedelta = timedelta(days=365 * 5),
period: str = 'FY', beta_period='Monthly', benchmark: str = '^GSPC'):
from_date = date - lookback_period - lookback_lookback_period
to_date = date - lookback_period
dictio = {'Common Equity': (cost_of_equity_capm(stock=stock, from_date=from_date, to_date=to_date,
beta_period=beta_period,
benchmark=benchmark),
fi.total_shareholders_equity(stock=stock, date=date, lookback_period=lookback_period,
period=period)),
'Preferred Equity': (
cost_of_preferred_stock(stock=stock, date=date, lookback_period=lookback_period, period=period),
fi.preferred_stock_value(stock=stock, date=date, lookback_period=lookback_period,
period=period)),
'Debt': (cost_of_debt(stock=stock, date=date, lookback_period=lookback_period, period=period),
fi.total_long_term_debt(stock=stock, date=date, lookback_period=lookback_period, period=period))}
capitals = [np.nan_to_num(v[1]) for k, v in dictio.items()]
weights = [part / sum(capitals) for part in capitals]
costs = [np.nan_to_num(v[0]) for k, v in dictio.items()]
return np.sum([weight * cost for weight, cost in zip(weights, costs)])
if __name__ == '__main__':
print(weighted_average_cost_of_capital('AAPL'))
|
the-stack_0_13497 | # coding: utf-8
# %% [markdown]
# # 📃 Solution for Exercise M2.01
#
# The aim of this exercise is to make the following experiments:
#
# * train and test a support vector machine classifier through
# cross-validation;
# * study the effect of the parameter gamma of this classifier using a
# validation curve;
# * use a learning curve to determine the usefulness of adding new
# samples in the dataset when building a classifier.
#
# To make these experiments we will first load the blood transfusion dataset.
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
import pandas as pd
blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")
data = blood_transfusion.drop(columns="Class")
target = blood_transfusion["Class"]
# %% [markdown]
# We will use a support vector machine classifier (SVM). In its most simple
# form, a SVM classifier is a linear classifier behaving similarly to a
# logistic regression. Indeed, the optimization used to find the optimal
# weights of the linear model are different but we don't need to know these
# details for the exercise.
#
# Also, this classifier can become more flexible/expressive by using a
# so-called kernel that makes the model become non-linear. Again, no requirement
# regarding the mathematics is required to accomplish this exercise.
#
# We will use an RBF kernel where a parameter `gamma` allows to tune the
# flexibility of the model.
#
# First let's create a predictive pipeline made of:
#
# * a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# with default parameter;
# * a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)
# where the parameter `kernel` could be set to `"rbf"`. Note that this is the
# default.
# %%
# solution
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
model = make_pipeline(StandardScaler(), SVC())
# %% [markdown]
# Evaluate the generalization performance of your model by cross-validation with a
# `ShuffleSplit` scheme. Thus, you can use
# [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)
# and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)
# to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit`
# and let the other parameters to the default.
# %%
# solution
from sklearn.model_selection import cross_validate, ShuffleSplit
cv = ShuffleSplit(random_state=0)
cv_results = cross_validate(model, data, target, cv=cv, n_jobs=2)
cv_results = pd.DataFrame(cv_results)
cv_results
# %% tags=["solution"]
print(
f"Accuracy score of our model:\n"
f"{cv_results['test_score'].mean():.3f} +/- "
f"{cv_results['test_score'].std():.3f}"
)
# %% [markdown]
# As previously mentioned, the parameter `gamma` is one of the parameters
# controlling under/over-fitting in support vector machine with an RBF kernel.
#
# Evaluate the effect of the parameter `gamma` by using the
# [`sklearn.model_selection.validation_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html) function.
# You can leave the default `scoring=None` which is equivalent to
# `scoring="accuracy"` for classification problems. You can vary `gamma`
# between `10e-3` and `10e2` by generating samples on a logarithmic scale
# with the help of `np.logspace(-3, 2, num=30)`.
#
# Since we are manipulating a `Pipeline` the parameter name will be set to
# `svc__gamma` instead of only `gamma`. You can retrieve the parameter name
# using `model.get_params().keys()`. We will go more into detail regarding
# accessing and setting hyperparameter in the next section.
# %%
# solution
import numpy as np
from sklearn.model_selection import validation_curve
gammas = np.logspace(-3, 2, num=30)
param_name = "svc__gamma"
train_scores, test_scores = validation_curve(
model, data, target, param_name=param_name, param_range=gammas, cv=cv,
n_jobs=2)
# %% [markdown]
# Plot the validation curve for the train and test scores.
# %%
# solution
import matplotlib.pyplot as plt
plt.errorbar(gammas, train_scores.mean(axis=1),
yerr=train_scores.std(axis=1), label='Training score')
plt.errorbar(gammas, test_scores.mean(axis=1),
yerr=test_scores.std(axis=1), label='Testing score')
plt.legend()
plt.xscale("log")
plt.xlabel(r"Value of hyperparameter $\gamma$")
plt.ylabel("Accuracy score")
_ = plt.title("Validation score of support vector machine")
# %% [markdown] tags=["solution"]
# Looking at the curve, we can clearly identify the over-fitting regime of
# the SVC classifier when `gamma > 1`.
# The best setting is around `gamma = 1` while for `gamma < 1`,
# it is not very clear if the classifier is under-fitting but the
# testing score is worse than for `gamma = 1`.
# %% [markdown]
# Now, you can perform an analysis to check whether adding new samples to the
# dataset could help our model to better generalize. Compute the learning curve
# (using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html))
# by computing the train and test scores for different training dataset size.
# Plot the train and test scores with respect to the number of samples.
# %%
# solution
from sklearn.model_selection import learning_curve
train_sizes = np.linspace(0.1, 1, num=10)
results = learning_curve(
model, data, target, train_sizes=train_sizes, cv=cv, n_jobs=2)
train_size, train_scores, test_scores = results[:3]
# %% tags=["solution"]
plt.errorbar(train_size, train_scores.mean(axis=1),
yerr=train_scores.std(axis=1), label='Training score')
plt.errorbar(train_size, test_scores.mean(axis=1),
yerr=test_scores.std(axis=1), label='Testing score')
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
plt.xlabel("Number of samples in the training set")
plt.ylabel("Accuracy")
_ = plt.title("Learning curve for support vector machine")
# %% [markdown] tags=["solution"]
# We observe that adding new samples in the dataset does not improve the
# testing score. We can only conclude that the standard deviation of
# the training error is decreasing when adding more samples which is not a
# surprise.
|
the-stack_0_13498 | #!/usr/bin/env python3
import ssl
from pwncat.channel import ChannelError
from pwncat.channel.connect import Connect
class SSLConnect(Connect):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _socket_connected(self, client):
try:
self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.context.check_hostname = False
self.context.verify_mode = ssl.VerifyMode.CERT_NONE
client = self.context.wrap_socket(client)
except ssl.SSLError as exc:
raise ChannelError(self, str(exc))
super()._socket_connected(client)
|
the-stack_0_13499 | import logging
def derive_initial_state_model(max_repeats,
num_symbols,
max_extra_states=15,
start_symbol=1000,
end_symbol=1001,
num_random_starts=20
):
"""derives initial state model using Expectation-Maximization (E-M) algorithm
Parameters
----------
max_extra_states : int
Maximum number of states to allow for each symbol.
Default is 15.
start_symbol : int
Numerical symbol for the start state. Default is 1000.
end_symbol : int
Numerical symbol for the end state. Default is 1001.
max_repeats : dict
where each key is a symbol and the corresponding value is
the maximum number of consecutive repeats of that symbol
found in any of the sequences
num_symbols : int
number of unique symbols, i.e., len(symbols)
num_random_starts : int
Number of random starts to derive the best model. Default is 20.
Returns
-------
"""
for num_extra_states in range(1, max_extra_states+1):
logging.info(f'Trying model with {num_extra_states} extra_states.')
state_symbols = [start_symbol, end_symbol]
max_repeat_nums = [0, 0]
for symbol in range(0,num_symbols):
number_states = 1 + num_extra_states
state_symbols.extend([symbol] * number_states)
max_repeat_nums.extend([max_repeats[symbol]] * number_states)
|
the-stack_0_13502 | from plenum.test.test_node import ensure_node_disconnected, getNonPrimaryReplicas
from indy_node.test.helper import addRawAttribute
from indy_client.test.conftest import nodeSet
from indy_common.test.conftest import config_helper_class, node_config_helper_class
def test_n_minus_f_pool_processes_attrib(looper, nodeSet, up,
steward, stewardWallet):
"""
The pool N-f nodes should be able to process ATTRIB txn.
https://jira.hyperledger.org/browse/INDY-245
"""
make_pool_n_minus_f_nodes(looper, nodeSet)
addRawAttribute(looper, steward, stewardWallet,
'foo', 'bar')
def make_pool_n_minus_f_nodes(looper, nodeSet):
non_primary, other_nodes = get_any_non_primary_and_others(nodeSet)
disconnect_node(looper, non_primary, other_nodes)
def get_any_non_primary_and_others(node_set):
non_primary_node = getNonPrimaryReplicas(node_set, 0)[0].node
other_nodes = [n for n in node_set if n != non_primary_node]
return non_primary_node, other_nodes
def disconnect_node(looper, node, other_nodes):
node.stop()
looper.removeProdable(node)
ensure_node_disconnected(looper, node, other_nodes)
check_if_pool_n_minus_f(other_nodes)
def check_if_pool_n_minus_f(nodes):
for node in nodes:
min_connection = node.minimumNodes - 1 # subtract node itself
assert len(node.nodestack.connecteds) == min_connection, \
"the pool should have minimum (N-f) nodes connected"
|
the-stack_0_13504 | import sys
sys.path.append(".")
import numpy as np
from dctree.core.basemodel import BaseModel
class AdaboostClassifier(BaseModel):
def __init__(self,base_estimator:BaseModel,n_estimators=5,learning_rate=1.0,base_estimator_params={}):
"""
An AdaBoost classifier \n
Params:
base_estimator : The base estimator from which the boosted ensemble is built
n_estimators : The maximum number of estimators at which boosting is terminated
learning_rate : Weight applied to each classifier at each boosting iteration
base_estimator_params : The parameters of base estimators
"""
self.estimator = base_estimator
self.estimator_params = base_estimator_params
self.learning_rate = learning_rate
self.n_estimators = n_estimators
def init_estimators(self):
"""
Initialize base estimators.
"""
estimators = []
for _ in range(self.n_estimators):
estimator = self.estimator()
for key,value in self.estimator_params.items():
setattr(estimator,key,value)
estimators.append(estimator)
return estimators
def init_sample_weights(self,sample_size:int,init_weight:float=None):
"""
Initialize the sample weights.
"""
if init_weight is not None:
weights = np.full(sample_size,init_weight)
else:
weight = 1 / sample_size
weights = np.full(sample_size,weight)
return weights
def calculate_error_rate(self,estimator,X:np.ndarray,Y:np.ndarray,W:np.ndarray=None):
"""
Calculate the error rate of base estimator
"""
if W is not None:
return 1 - estimator.score(X,Y,W)
else:
return 1 - estimator.score(X,Y)
def score(self,X:np.ndarray,Y:np.ndarray):
"""
Return the mean accuracy on the given test data and labels \n
Params:
X : Test samples
Y : True labels for X
"""
Y_pred = self.predict(X)
Y_comp = (Y_pred==Y).astype(np.int8)
sum = np.sum(Y_comp)
return sum / Y_comp.shape[0]
def calculate_model_coefficient(self,error_rate,n_classes,epsilon=1e-6):
"""
Calculate the coefficient of base estimator
"""
alpha = self.learning_rate * (np.log((1-error_rate) / (error_rate + epsilon)) +\
np.log(n_classes-1)) #SAMME
return alpha
def calculate_new_weights(self,coef,Y_pred:np.ndarray,Y:np.ndarray,W:np.ndarray):
"""
Calculate new weights
"""
W_new = np.zeros_like(W)
for i,w in enumerate(W):
y_pred = Y_pred[i]
y = Y[i]
param = coef * int(y_pred != y)
w_new = w * np.exp(param)
W_new[i] = w_new
return W_new
def _fit(self,X:np.ndarray,Y:np.ndarray):
sample_size = X.shape[0]
self.n_classes = len(np.unique(Y)) #计算Y的分类数
n_classes = self.n_classes
self.estimators = self.init_estimators() #初始化学习器
self.W = self.init_sample_weights(sample_size) #初始化权重
self.coefs = np.zeros(len(self.estimators)) #初始化模型系数
for i,estimator in enumerate(self.estimators):
W = self.W
estimator.fit(X,Y,sample_weight=W)
error = self.calculate_error_rate(estimator,X,Y,W)
coef = self.calculate_model_coefficient(error,n_classes)
self.coefs[i] = coef
Y_pred = estimator.predict(X)
self.W = self.calculate_new_weights(coef,Y_pred,Y,W)
def _predict(self,X:np.ndarray):
len_X = X.shape[0]
Y_pred = np.zeros(len_X,dtype=np.int32) #初始化分类结果
for i,row in enumerate(X):
x = row.reshape(1,-1)
W = np.zeros(self.n_classes)
for j,estimator in enumerate(self.estimators):
y_pred = estimator.predict(x)
W[y_pred] += self.coefs[j]
Y_pred[i] = np.argmax(W)
return Y_pred
|
the-stack_0_13506 | """Test functions for the sparse.linalg._onenormest module
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
import pytest
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
"""
This is purely for onenormest testing.
"""
def __init__(self, A, B):
if A.ndim != 2 or B.ndim != 2:
raise ValueError('expected ndarrays representing matrices')
if A.shape[1] != B.shape[0]:
raise ValueError('incompatible shapes')
self.A = A
self.B = B
self.ndim = 2
self.shape = (A.shape[0], B.shape[1])
def _matvec(self, x):
return np.dot(self.A, np.dot(self.B, x))
def _rmatvec(self, x):
return np.dot(np.dot(x, self.A), self.B)
def _matmat(self, X):
return np.dot(self.A, np.dot(self.B, X))
@property
def T(self):
return MatrixProductOperator(self.B.T, self.A.T)
class TestOnenormest:
@pytest.mark.xslow
def test_onenormest_table_3_t_2(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 2
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
# check the max and mean required column resamples
assert_equal(np.max(nresample_list), 2)
assert_(0.05 < np.mean(nresample_list) < 0.2)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.9 < proportion_exact < 0.95)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
@pytest.mark.xslow
def test_onenormest_table_4_t_7(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 7
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = np.random.randint(-1, 2, size=(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
# check the required column resamples
assert_equal(np.max(nresample_list), 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.15 < proportion_exact < 0.25)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
def test_onenormest_table_5_t_1(self):
# "note that there is no randomness and hence only one estimate for t=1"
t = 1
n = 100
itmax = 5
alpha = 1 - 1e-6
A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
first_col = np.array([1] + [0]*(n-1))
first_row = np.array([(-alpha)**i for i in range(n)])
B = -scipy.linalg.toeplitz(first_col, first_row)
assert_allclose(A, B)
est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
exact_value = scipy.linalg.norm(B, 1)
underest_ratio = est / exact_value
assert_allclose(underest_ratio, 0.05, rtol=1e-4)
assert_equal(nmults, 11)
assert_equal(nresamples, 0)
# check the non-underscored version of onenormest
est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
assert_allclose(est, est_plain)
@pytest.mark.xslow
def test_onenormest_table_6_t_1(self):
#TODO this test seems to give estimates that match the table,
#TODO even though no attempt has been made to deal with
#TODO complex numbers in the one-norm estimation.
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 1
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
A = scipy.linalg.inv(A_inv)
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
underestimation_ratio_mean = np.mean(underestimation_ratio)
assert_(0.90 < underestimation_ratio_mean < 0.99)
# check the required column resamples
max_nresamples = np.max(nresample_list)
assert_equal(max_nresamples, 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.7 < proportion_exact < 0.8)
# check the average number of matrix*vector multiplications
mean_nmult = np.mean(nmult_list)
assert_(4 < mean_nmult < 5)
def _help_product_norm_slow(self, A, B):
# for profiling
C = np.dot(A, B)
return scipy.linalg.norm(C, 1)
def _help_product_norm_fast(self, A, B):
# for profiling
t = 2
itmax = 5
D = MatrixProductOperator(A, B)
est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
return est
@pytest.mark.slow
def test_onenormest_linear_operator(self):
# Define a matrix through its product A B.
# Depending on the shapes of A and B,
# it could be easy to multiply this product by a small matrix,
# but it could be annoying to look at all of
# the entries of the product explicitly.
np.random.seed(1234)
n = 6000
k = 3
A = np.random.randn(n, k)
B = np.random.randn(k, n)
fast_estimate = self._help_product_norm_fast(A, B)
exact_value = self._help_product_norm_slow(A, B)
assert_(fast_estimate <= exact_value <= 3*fast_estimate,
'fast: %g\nexact:%g' % (fast_estimate, exact_value))
def test_returns(self):
np.random.seed(1234)
A = scipy.sparse.rand(50, 50, 0.1)
s0 = scipy.linalg.norm(A.todense(), 1)
s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
assert_allclose(s1, s0, rtol=1e-9)
assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
assert_allclose(A.dot(v), w, rtol=1e-9)
class TestAlgorithm_2_2:
def test_randn_inv(self):
np.random.seed(1234)
n = 20
nsamples = 100
for i in range(nsamples):
# Choose integer t uniformly between 1 and 3 inclusive.
t = np.random.randint(1, 4)
# Choose n uniformly between 10 and 40 inclusive.
n = np.random.randint(10, 41)
# Sample the inverse of a matrix with random normal entries.
A = scipy.linalg.inv(np.random.randn(n, n))
# Compute the 1-norm bounds.
g, ind = _algorithm_2_2(A, A.T, t)
|
the-stack_0_13507 | # -*- encoding: utf-8 -*-
#
# Author: John Tran <[email protected]>
# Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.version as dist_version
import os
import migrate
from migrate.versioning import util as migrate_util
import sqlalchemy
from ceilometer.openstack.common import log
INIT_VERSION = 1
LOG = log.getLogger(__name__)
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__')
or dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
def db_sync(engine, version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise Exception(_("version should be an integer"))
current_version = db_version(engine)
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def db_version(engine):
repository = _find_migrate_repo()
try:
return versioning_api.db_version(engine,
repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(engine, 0)
return versioning_api.db_version(engine, repository)
def db_version_control(engine, version=None):
repository = _find_migrate_repo()
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
the-stack_0_13508 | from django.db.models.signals import post_migrate
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
if not content_type.model:
continue
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type,
codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
print("Added view permission for %s" % content_type.name)
post_migrate.connect(add_view_permissions)
|
the-stack_0_13512 | # -*- coding: utf-8 -*-
import logging
import subprocess
from imghdr import what as determinetype
from django.core.files.base import ContentFile
from django.core.files.temp import NamedTemporaryFile
from easy_thumbnails.optimize.conf import settings
try:
from subprocess import check_output
except ImportError:
def check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
logger = logging.getLogger('easy_thumbnails.optimize')
def optimize_thumbnail(thumbnail):
'''Optimize thumbnail images by removing unnecessary data'''
try:
optimize_command = settings.THUMBNAIL_OPTIMIZE_COMMAND[determinetype(thumbnail.path)]
if not optimize_command:
return
except (TypeError, KeyError, NotImplementedError):
return
storage = thumbnail.storage
try:
with NamedTemporaryFile() as temp_file:
thumbnail.seek(0)
temp_file.write(thumbnail.read())
temp_file.flush()
optimize_command = optimize_command.format(filename=temp_file.name)
output = check_output(optimize_command, stderr=subprocess.STDOUT, shell=True)
if output:
logger.warn('{0} returned {1}'.format(optimize_command, output))
else:
logger.info('{0} returned nothing'.format(optimize_command))
with open(temp_file.name, 'rb') as f:
thumbnail.file = ContentFile(f.read())
storage.delete(thumbnail.path)
storage.save(thumbnail.path, thumbnail)
except Exception as e:
logger.error(e)
|
the-stack_0_13514 | from No import No
from Estado import Estado
from collections import deque
def executaBFS():
"""
Esta função executa a pesquisa BFS usando uma fila
"""
#criar fila
fila = deque([])
#por ser um gráfico, criamos uma lista de visitantes
visitados = []
#criar nó raiz
estadoInicial = Estado()
raiz = No(estadoInicial)
#adicionar à fila e lista de visitados
fila .append(raiz)
visitados.append(raiz.estado.nome)
# verifique se há algo na para retirar da fila (dar o dequeue)
while len(fila) > 0:
#obtem o primeiro item da fila
noAtual = fila.popleft()
print ("-- dequeue --", noAtual.estado.nome)
#verifica se é o estado meta
if noAtual.estado.funcaoObjetivo():
print ("Atingiu o estado objetivo")
#faz o print do caminho
print ("----------------------")
print ("Caminho")
noAtual.printCaminho()
break
#pega os nos filhos
estadosFilhos = noAtual.estado.funcaoSucessora()
for estadoFilho in estadosFilhos:
noFilho = No(Estado(estadoFilho))
#verifica se o no ainda não foi visitado
if noFilho.estado.nome not in visitados:
#coloca na lista de nos visitados
visitados.append(noFilho.estado.nome )
#coloca na arvore e na fila
noAtual.addFilho(noFilho)
fila.append(noFilho)
#print arvore
print ("----------------------")
print ("Arvore")
raiz.printArvore()
executaBFS() |
the-stack_0_13515 | """This module defines the funtions byref_at(cobj, offset)
and cast_field(struct, fieldname, fieldtype).
"""
from ctypes import *
def _calc_offset():
# Internal helper function that calculates where the object
# returned by a byref() call stores the pointer.
# The definition of PyCArgObject in C code (that is the type of
# object that a byref() call returns):
class PyCArgObject(Structure):
class value(Union):
_fields_ = [("c", c_char),
("h", c_short),
("i", c_int),
("l", c_long),
("q", c_longlong),
("d", c_double),
("f", c_float),
("p", c_void_p)]
#
# Thanks to Lenard Lindstrom for this tip:
# sizeof(PyObject_HEAD) is the same as object.__basicsize__.
#
_fields_ = [("PyObject_HEAD", c_byte * object.__basicsize__),
("pffi_type", c_void_p),
("tag", c_char),
("value", value),
("obj", c_void_p),
("size", c_int)]
_anonymous_ = ["value"]
# additional checks to make sure that everything works as expected
if sizeof(PyCArgObject) != type(byref(c_int())).__basicsize__:
raise RuntimeError("sizeof(PyCArgObject) invalid")
obj = c_int()
ref = byref(obj)
argobj = PyCArgObject.from_address(id(ref))
if argobj.obj != id(obj) or \
argobj.p != addressof(obj) or \
argobj.tag != 'P':
raise RuntimeError("PyCArgObject field definitions incorrect")
return PyCArgObject.p.offset # offset of the pointer field
################################################################
#
# byref_at
#
def byref_at(obj, offset,
_byref=byref,
_c_void_p_from_address = c_void_p.from_address,
_byref_pointer_offset = _calc_offset()
):
"""byref_at(cobj, offset) behaves similar this C code:
(((char *)&obj) + offset)
In other words, the returned 'pointer' points to the address of
'cobj' + 'offset'. 'offset' is in units of bytes.
"""
ref = _byref(obj)
# Change the pointer field in the created byref object by adding
# 'offset' to it:
_c_void_p_from_address(id(ref)
+ _byref_pointer_offset).value += offset
return ref
################################################################
#
# cast_field
#
def cast_field(struct, fieldname, fieldtype, offset=0,
_POINTER=POINTER,
_byref_at=byref_at,
_byref=byref,
_divmod=divmod,
_sizeof=sizeof,
):
"""cast_field(struct, fieldname, fieldtype)
Return the contents of a struct field as it it were of type
'fieldtype'.
"""
fieldoffset = getattr(type(struct), fieldname).offset
return cast(_byref_at(struct, fieldoffset),
_POINTER(fieldtype))[0]
__all__ = ["byref_at", "cast_field"]
|
the-stack_0_13516 | import torch.nn as nn
class NLayerDiscriminator(nn.Module):
"""
A PatchGAN
"""
def __init__(self, input_nc, ndf=64, n_layers=3,
norm_layer=nn.BatchNorm2d):
super(NLayerDiscriminator, self).__init__()
kwidth = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kwidth,
stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
# gradually increase the number of filters
for i in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2 ** i, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kwidth, stride=2, padding=padw, bias=False),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kwidth, stride=1, padding=padw, bias=False),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
# output 1 channel prediction map
sequence += [nn.Conv2d(ndf * nf_mult, 1,
kernel_size=kwidth, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input_x):
return self.model(input_x)
|
the-stack_0_13519 | #!/usr/bin/python3
from keras.datasets import mnist
from keras.preprocessing.image import load_img, array_to_img
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import matplotlib.pyplot as plt
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Jugando con el DataSet
#
# print(X_train.size) # No se bien que mustra
# print(X_train.shape) # Totan de imagenes, dimencion x, dimencion y
# print(y_train.shape)
# print(X_test.shape)
# print(y_test.shape)
# Mostrando el 5
# print(X_train.shape, ' is ', y_train[0])
# plt.imshow(X_train[0], cmap="gray")
# plt.show(block=True)
# Preprocessing the image data
#
image_height, image_width = 28, 28
# Redimencionar los 60k de ejemplos
X_train = X_train.reshape(60000, image_height * image_width)
# print(X_train.shape) # Totan de imagenes, dimencion en una linea
X_test = X_test.reshape(10000, image_height * image_width)
# print(X_test.shape)
# print(X_train[0])
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
# print(X_train[0])
# Build a model
#
# Reprecentan 10 categorias
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# print(y_train.shape)
# print(y_test.shape)
# Modelo
model = Sequential()
# Modelo con tres capas
# capa 1 con 512 neuronas
# model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dense(512, activation='relu', input_shape=(image_height * image_width,)))
model.add(Dense(512, activation='relu'))
# capa 3 con 10 neuronas y 10 salidas
model.add(Dense(10, activation='softmax'))
# Compile the model
# Creo que categorical_crossentropy es porque usamos clases
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# Train the model
EPOCHS = 20 # epocas
history = model.fit(X_train, y_train, epochs=EPOCHS, validation_data=(X_test, y_test))
# What is the accuracy of the model?
#
# Plot the accuracy of the training model
plt.plot(history.history['acc'])
# Plot the accuracy of training and validation set
plt.plot(history.history['val_acc'])
# Accuracy of training and validation with loss
plt.plot(history.history['loss'])
plt.show()
# Evaluating the model
score = model.evaluate(X_test, y_test)
print(score)
|
the-stack_0_13520 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import SkipTest
from pytest import raises
from neo4j.work.simple import Statement, SessionError
from neo4j.exceptions import CypherError, ClientError, TransientError
from neo4j.graph import Node, Relationship
def test_can_run_simple_statement(session):
result = session.run("RETURN 1 AS n")
for record in result:
assert record[0] == 1
assert record["n"] == 1
with raises(KeyError):
_ = record["x"]
assert record["n"] == 1
with raises(KeyError):
_ = record["x"]
with raises(TypeError):
_ = record[object()]
assert repr(record)
assert len(record) == 1
def test_can_run_simple_statement_with_params(session):
count = 0
for record in session.run("RETURN $x AS n",
{"x": {"abc": ["d", "e", "f"]}}):
assert record[0] == {"abc": ["d", "e", "f"]}
assert record["n"] == {"abc": ["d", "e", "f"]}
assert repr(record)
assert len(record) == 1
count += 1
assert count == 1
def test_autocommit_transactions_use_bookmarks(neo4j_driver):
bookmarks = []
# Generate an initial bookmark
with neo4j_driver.session() as session:
session.run("CREATE ()").consume()
bookmark = session.last_bookmark()
assert bookmark is not None
bookmarks.append(bookmark)
# Propagate into another session
with neo4j_driver.session(bookmarks=bookmarks) as session:
assert list(session.next_bookmarks()) == bookmarks
session.run("CREATE ()").consume()
bookmark = session.last_bookmark()
assert bookmark is not None
assert bookmark not in bookmarks
def test_fails_on_bad_syntax(session):
with raises(CypherError):
session.run("X").consume()
def test_fails_on_missing_parameter(session):
with raises(CypherError):
session.run("RETURN {x}").consume()
def test_can_run_statement_that_returns_multiple_records(session):
count = 0
for record in session.run("unwind(range(1, 10)) AS z RETURN z"):
assert 1 <= record[0] <= 10
count += 1
assert count == 10
def test_can_use_with_to_auto_close_session(session):
record_list = list(session.run("RETURN 1"))
assert len(record_list) == 1
for record in record_list:
assert record[0] == 1
def test_can_return_node(neo4j_driver):
with neo4j_driver.session() as session:
record_list = list(session.run("CREATE (a:Person {name:'Alice'}) "
"RETURN a"))
assert len(record_list) == 1
for record in record_list:
alice = record[0]
assert isinstance(alice, Node)
assert alice.labels == {"Person"}
assert dict(alice) == {"name": "Alice"}
def test_can_return_relationship(neo4j_driver):
with neo4j_driver.session() as session:
record_list = list(session.run("CREATE ()-[r:KNOWS {since:1999}]->() "
"RETURN r"))
assert len(record_list) == 1
for record in record_list:
rel = record[0]
assert isinstance(rel, Relationship)
assert rel.type == "KNOWS"
assert dict(rel) == {"since": 1999}
# TODO: re-enable after server bug is fixed
# def test_can_return_path(session):
# with self.driver.session() as session:
# record_list = list(session.run("MERGE p=({name:'Alice'})-[:KNOWS]->"
# "({name:'Bob'}) RETURN p"))
# assert len(record_list) == 1
# for record in record_list:
# path = record[0]
# assert isinstance(path, Path)
# assert path.start_node["name"] == "Alice"
# assert path.end_node["name"] == "Bob"
# assert path.relationships[0].type == "KNOWS"
# assert len(path.nodes) == 2
# assert len(path.relationships) == 1
def test_can_handle_cypher_error(session):
with raises(CypherError):
session.run("X").consume()
def test_keys_are_available_before_and_after_stream(session):
result = session.run("UNWIND range(1, 10) AS n RETURN n")
assert list(result.keys()) == ["n"]
list(result)
assert list(result.keys()) == ["n"]
def test_keys_with_an_error(session):
with raises(CypherError):
result = session.run("X")
list(result.keys())
def test_should_not_allow_empty_statements(session):
with raises(ValueError):
_ = session.run("")
def test_statement_object(session):
value = session.run(Statement("RETURN $x"), x=1).single().value()
assert value == 1
def test_autocommit_transactions_should_support_metadata(session):
metadata_in = {"foo": "bar"}
try:
statement = Statement("CALL dbms.getTXMetaData", metadata=metadata_in)
metadata_out = session.run(statement).single().value()
except ClientError as e:
if e.code == "Neo.ClientError.Procedure.ProcedureNotFound":
raise SkipTest("Cannot assert correct metadata as Neo4j edition "
"does not support procedure dbms.getTXMetaData")
else:
raise
else:
assert metadata_in == metadata_out
def test_autocommit_transactions_should_support_timeout(neo4j_driver):
with neo4j_driver.session() as s1:
s1.run("CREATE (a:Node)").consume()
with neo4j_driver.session() as s2:
tx1 = s1.begin_transaction()
tx1.run("MATCH (a:Node) SET a.property = 1").consume()
with raises(TransientError):
s2.run(Statement("MATCH (a:Node) SET a.property = 2",
timeout=0.25)).consume()
def test_regex_in_parameter(session):
matches = session.run("UNWIND ['A', 'B', 'C', 'A B', 'B C', 'A B C', "
"'A BC', 'AB C'] AS t WITH t "
"WHERE t =~ $re RETURN t", re=r'.*\bB\b.*').value()
assert matches == ["B", "A B", "B C", "A B C"]
def test_regex_inline(session):
matches = session.run(r"UNWIND ['A', 'B', 'C', 'A B', 'B C', 'A B C', "
r"'A BC', 'AB C'] AS t WITH t "
r"WHERE t =~ '.*\\bB\\b.*' RETURN t").value()
assert matches == ["B", "A B", "B C", "A B C"]
def test_automatic_reset_after_failure(session):
try:
session.run("X").consume()
except CypherError:
result = session.run("RETURN 1")
record = next(iter(result))
assert record[0] == 1
else:
assert False, "A Cypher error should have occurred"
def test_session_error(bolt_driver):
session = bolt_driver.session()
session.close()
with raises(SessionError):
session.run("RETURN 1")
def test_large_values(bolt_driver):
for i in range(1, 7):
with bolt_driver.session() as session:
session.run("RETURN '{}'".format("A" * 2 ** 20))
|
the-stack_0_13521 | #!/usr/bin/env python -tt
#
# Copyright (c) 2007 Red Hat Inc.
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from wic import msger
from wic.utils.errors import CreatorError
from wic.utils import runner
class BaseImageCreator():
"""Base class for image creation.
BaseImageCreator is the simplest creator class available; it will
create a system image according to the supplied kickstart file.
e.g.
import wic.imgcreate as imgcreate
ks = imgcreate.read_kickstart("foo.ks")
imgcreate.ImageCreator(ks, "foo").create()
"""
def __del__(self):
self.cleanup()
def __init__(self, createopts=None):
"""Initialize an ImageCreator instance.
ks -- a pykickstart.KickstartParser instance; this instance will be
used to drive the install by e.g. providing the list of packages
to be installed, the system configuration and %post scripts
name -- a name for the image; used for e.g. image filenames or
filesystem labels
"""
self.__builddir = None
self.ks = None
self.name = "target"
self.tmpdir = "/var/tmp/wic"
self.workdir = "/var/tmp/wic/build"
# setup tmpfs tmpdir when enabletmpfs is True
self.enabletmpfs = False
if createopts:
# Mapping table for variables that have different names.
optmap = {"outdir" : "destdir",
}
# update setting from createopts
for key in createopts:
if key in optmap:
option = optmap[key]
else:
option = key
setattr(self, option, createopts[key])
self.destdir = os.path.abspath(os.path.expanduser(self.destdir))
self._dep_checks = ["ls", "bash", "cp", "echo"]
# Output image file names
self.outimage = []
# No ks provided when called by convertor, so skip the dependency check
if self.ks:
# If we have btrfs partition we need to check necessary tools
for part in self.ks.partitions:
if part.fstype and part.fstype == "btrfs":
self._dep_checks.append("mkfs.btrfs")
break
# make sure the specified tmpdir and cachedir exist
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
#
# Hooks for subclasses
#
def _create(self):
"""Create partitions for the disk image(s)
This is the hook where subclasses may create the partitions
that will be assembled into disk image(s).
There is no default implementation.
"""
pass
def _cleanup(self):
"""Undo anything performed in _create().
This is the hook where subclasses must undo anything which was
done in _create().
There is no default implementation.
"""
pass
#
# Actual implementation
#
def __ensure_builddir(self):
if not self.__builddir is None:
return
try:
self.workdir = os.path.join(self.tmpdir, "build")
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
self.__builddir = tempfile.mkdtemp(dir=self.workdir,
prefix="imgcreate-")
except OSError as err:
raise CreatorError("Failed create build directory in %s: %s" %
(self.tmpdir, err))
def __setup_tmpdir(self):
if not self.enabletmpfs:
return
runner.show('mount -t tmpfs -o size=4G tmpfs %s' % self.workdir)
def __clean_tmpdir(self):
if not self.enabletmpfs:
return
runner.show('umount -l %s' % self.workdir)
def create(self):
"""Create partitions for the disk image(s)
Create the partitions that will be assembled into disk
image(s).
"""
self.__setup_tmpdir()
self.__ensure_builddir()
self._create()
def cleanup(self):
"""Undo anything performed in create().
Note, make sure to call this method once finished with the creator
instance in order to ensure no stale files are left on the host e.g.:
creator = ImageCreator(ks, name)
try:
creator.create()
finally:
creator.cleanup()
"""
if not self.__builddir:
return
self._cleanup()
shutil.rmtree(self.__builddir, ignore_errors=True)
self.__builddir = None
self.__clean_tmpdir()
def print_outimage_info(self):
msg = "The new image can be found here:\n"
self.outimage.sort()
for path in self.outimage:
msg += ' %s\n' % os.path.abspath(path)
msger.info(msg)
|
the-stack_0_13522 | def incworkload(inchtml,composition):
Xh = 45100*(composition/(composition+1)) / (3 * 60 * 60)*inchtml
Xi = 45100*(1/(composition+1)) / (3 * 60 * 60)*inchtml
request_size_h = 3000 / 1000
request_size_i = 15000 / 1000
Dhc = (0.008 + (0.002 * request_size_h))
Dic = (0.008 + (0.002 * request_size_i))
Dhd = 0.012 * request_size_h
Did = 0.012 * request_size_i
Uhc = Xh * Dhc
Uic = Xi * Dic
Uhd = Xh * Dhd
Uid = Xi * Did
Uc = Uhc + Uic
Ud = Uhd + Uid
Rhc = Dhc / (1 - Uc)
Ric = Dic / (1 - Uc)
Rhd = Dhd / (1 - Ud)
Rid = Did / (1 - Ud)
Rhtml = Rhc + Rhd
Rimage = Ric + Rid
print("composition:" + str(int(composition)))
print("Xh={:.4f},Xi={:.4f}".format(Xh, Xi))
print("Dhc={:.4f},Dic={:.4f},Dhd={:.4f},Did={:.4f}".
format(Dhc, Dic, Dhd, Did))
print("Uhc={:.4f},Uic={:.4f},Uhd={:.4f},Uid={:.4f}".
format(Uhc, Uic, Uhd, Uid))
print("Uc={:.4f},Ud={:.4f}".format(Uc, Ud))
# print("Rhc={:.4f},Ric={:.4f},Rhd={:.4f},Rid={:.4f}".
# format(Rhc, Ric, Rhd, Rid))
# print("Rhtml={:.4f},Rimage={:.4f}".format(Rhtml, Rimage))
print("-------------------------")
incworkload(inchtml=1,composition=8)
incworkload(inchtml=1,composition=10)
incworkload(inchtml=4,composition=10)
incworkload(inchtml=5,composition=10)
|
the-stack_0_13524 | """Test data purging."""
import json
from datetime import datetime, timedelta
import unittest
from homeassistant.components import recorder
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.purge import purge_old_data
from homeassistant.components.recorder.models import States, Events
from homeassistant.components.recorder.util import session_scope
from tests.common import get_test_home_assistant, init_recorder_component
class TestRecorderPurge(unittest.TestCase):
"""Base class for common recorder tests."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
init_recorder_component(self.hass)
self.hass.start()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def _add_test_states(self):
"""Add multiple states to the db for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.block_till_done()
self.hass.data[DATA_INSTANCE].block_till_done()
with recorder.session_scope(hass=self.hass) as session:
for event_id in range(5):
if event_id < 3:
timestamp = five_days_ago
state = 'purgeme'
else:
timestamp = now
state = 'dontpurgeme'
session.add(States(
entity_id='test.recorder2',
domain='sensor',
state=state,
attributes=json.dumps(attributes),
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event_id + 1000
))
def _add_test_events(self):
"""Add a few events for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
event_data = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.block_till_done()
self.hass.data[DATA_INSTANCE].block_till_done()
with recorder.session_scope(hass=self.hass) as session:
for event_id in range(5):
if event_id < 2:
timestamp = five_days_ago
event_type = 'EVENT_TEST_PURGE'
else:
timestamp = now
event_type = 'EVENT_TEST'
session.add(Events(
event_type=event_type,
event_data=json.dumps(event_data),
origin='LOCAL',
created=timestamp,
time_fired=timestamp,
))
def test_purge_old_states(self):
"""Test deleting old states."""
self._add_test_states()
# make sure we start with 5 states
with session_scope(hass=self.hass) as session:
states = session.query(States)
self.assertEqual(states.count(), 5)
# run purge_old_data()
purge_old_data(self.hass.data[DATA_INSTANCE], 4)
# we should only have 2 states left after purging
self.assertEqual(states.count(), 2)
def test_purge_old_events(self):
"""Test deleting old events."""
self._add_test_events()
with session_scope(hass=self.hass) as session:
events = session.query(Events).filter(
Events.event_type.like("EVENT_TEST%"))
self.assertEqual(events.count(), 5)
# run purge_old_data()
purge_old_data(self.hass.data[DATA_INSTANCE], 4)
# now we should only have 3 events left
self.assertEqual(events.count(), 3)
|
the-stack_0_13526 | from django.db import models
from django.db.models import Q
from django.utils.encoding import force_text
from cms.models import CMSPlugin, Placeholder
class AliasPluginModel(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(
CMSPlugin,
on_delete=models.CASCADE,
related_name='cms_aliasplugin',
parent_link=True,
)
plugin = models.ForeignKey(
CMSPlugin,
on_delete=models.CASCADE,
editable=False,
related_name='alias_reference',
null=True,
)
alias_placeholder = models.ForeignKey(
Placeholder,
on_delete=models.CASCADE,
editable=False,
related_name='alias_placeholder',
null=True,
)
class Meta:
app_label = 'cms'
def __str__(self):
if self.plugin_id:
return "(%s) %s" % (force_text(self.plugin.get_plugin_name()), self.plugin.get_plugin_instance()[0])
else:
return force_text(self.alias_placeholder.get_label())
def get_aliased_placeholder_id(self):
if self.plugin_id:
placeholder_id = self.plugin.placeholder_id
else:
placeholder_id = self.alias_placeholder_id
return placeholder_id
def is_recursive(self):
placeholder_id = self.get_aliased_placeholder_id()
if not placeholder_id:
return False
plugins = AliasPluginModel.objects.filter(
plugin_type='AliasPlugin',
placeholder_id=placeholder_id,
)
plugins = plugins.filter(
Q(plugin=self) |
Q(plugin__placeholder=self.placeholder_id) |
Q(alias_placeholder=self.placeholder_id)
)
return plugins.exists()
|
the-stack_0_13529 | #!/usr/bin/env python3
# Copyright 2017-18 TransitCenter http://transitcenter.org
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Infer calls at bus stops based on GTFS schedules and bus position data"""
from __future__ import division
import sys
import os
import getpass
from bisect import bisect, bisect_left
from typing import Callable
from datetime import datetime, timedelta
from multiprocessing import Pool
import logging
import warnings
from collections import Counter, namedtuple
from itertools import cycle
import argparse
import psycopg2
from psycopg2.extras import NamedTupleCursor
import numpy as np
import pytz
logger = logging.getLogger()
logger.setLevel(logging.INFO)
loghandler = logging.StreamHandler(sys.stdout)
logformatter = logging.Formatter(
fmt="%(levelname)s (%(lineno)3d) %(asctime)s %(message)s"
)
loghandler.setFormatter(logformatter)
logger.addHandler(loghandler)
warnings.simplefilter("ignore")
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
"DEC2FLOAT",
lambda value, curs: float(value) if value is not None else None,
)
# Maximum elapsed time between positions before we declare a new run
MAX_TIME_BETWEEN_STOPS = timedelta(seconds=60 * 30)
# when dist_from_stop < 30.48 m (100 feet) considered "at stop" by MTA --NJ
# this is not correct! It's only that the sign displays "at stop"
# beginning at 100 ft. Nevertheless, we're doing 100 ft
STOP_THRESHOLD = 30.48
# Minimum distance between positions when extrapolating.
# When zero, identical positions are allowed, which can produce crazy results
MIN_EXTRAP_DIST = 1
# The number of positions to use when extrapolating.
EXTRAP_LENGTH = 5
# Maximum number of stops to extrapolate forward or backward
EXTRAP_COUNT = 2
# Doing one complicated thing in this query.
# Some bus routes are loops with tails (e.g. B74):
# +--+
# | |---- (start and end)
# +——+
# ST_LineLocatePoint can't handle this, so we use the mostly-untrustworthy
# "positions"."dist_along_route" column to limit the part of the shape_geom
# we examine to a fraction of the LineString.
VEHICLE_QUERY = """
SELECT
EXTRACT(EPOCH FROM timestamp) AS time,
trip_id,
trip_start_date AS date,
st.stop_sequence AS seq,
CASE
WHEN dist_along_route is NULL and dist_from_stop is NULL
THEN ST_LineLocatePoint(g.route_geom, n.position_geom) * h.length
ELSE inferno.safe_locate(
g.route_geom,
n.position_geom,
LEAST(h.length - 500, GREATEST(0, dist_along_route - dist_from_stop - 500))::numeric,
LEAST(h.length, GREATEST(dist_along_route, 0) + 100)::numeric,
h.length::numeric
)
END::numeric(10, 2) AS distance
FROM {0} AS p
LEFT JOIN gtfs.trips USING (trip_id)
-- TODO: change to LEFT JOIN when fix implemented for orphan stops
INNER JOIN gtfs.stop_times st USING (feed_index, trip_id, stop_id)
LEFT JOIN gtfs.shape_geoms r USING (feed_index, shape_id),
ST_Transform(r.the_geom, %(epsg)s) g (route_geom),
ST_Transform(ST_SetSRID(ST_MakePoint(longitude, latitude), 4326), %(epsg)s) n (position_geom),
LATERAL (SELECT CASE %(epsg)s WHEN 4326 THEN r.length ELSE ST_Length(g.route_geom) END) h (length)
WHERE
vehicle_id = %(vehicle)s
AND trip_start_date = %(date)s::date
ORDER BY "timestamp"
"""
SELECT_VEHICLE = """SELECT DISTINCT vehicle_id
FROM {0} WHERE trip_start_date = %s"""
SELECT_CALLED_VEHICLES = """SELECT vehicle_id FROM {calls}
WHERE source = 'I' AND call_time::date = %s
GROUP BY vehicle_id"""
SELECT_STOPTIMES = """SELECT
feed_index,
stop_id,
inferno.wall_timez(DATE %(date)s, arrival_time, agency_timezone) AS datetime,
DATE %(date)s as date,
route_id,
direction_id,
stop_sequence AS seq,
ST_LineLocatePoint(route.the_geom, ST_Transform(stops.the_geom, %(epsg)s)) * ST_Length(route.the_geom) distance
FROM gtfs.trips
LEFT JOIN gtfs.agency USING (feed_index)
LEFT JOIN gtfs.stop_times USING (feed_index, trip_id)
LEFT JOIN gtfs.stops USING (feed_index, stop_id)
LEFT JOIN gtfs.shape_geoms shape USING (feed_index, shape_id),
ST_Transform(shape.the_geom, %(epsg)s) route (the_geom)
WHERE trip_id = %(trip)s
AND feed_index = (
SELECT MAX(feed_index)
FROM gtfs.trips
LEFT JOIN gtfs.calendar USING (feed_index, service_id)
WHERE trip_id = %(trip)s
AND date %(date)s BETWEEN start_date and end_date
)
ORDER BY stop_sequence ASC
"""
SELECT_STOPTIMES_PLAIN = """SELECT DISTINCT
feed_index,
stop_id,
inferno.wall_timez(date %(date)s, arrival_time, agency_timezone) AS datetime,
date %(date)s as date,
route_id,
direction_id,
stop_sequence,
ST_LineLocatePoint(route.the_geom, ST_Transform(stops.the_geom, %(epsg)s)) * ST_Length(route.the_geom) distance
FROM gtfs.trips
LEFT JOIN gtfs.agency USING (feed_index)
LEFT JOIN gtfs.stop_times USING (feed_index, trip_id)
LEFT JOIN gtfs.stops USING (feed_index, stop_id)
LEFT JOIN gtfs.shape_geoms shape USING (feed_index, shape_id),
ST_Transform(shape.the_geom, %(epsg)s) route (the_geom)
WHERE trip_id = %(trip)s
ORDER BY stop_sequence ASC;
"""
INSERT = """INSERT INTO {}
(vehicle_id, trip_id, direction_id, stop_id, run_index,
call_time, source, deviation, feed_index, date)
VALUES (%(vehicle)s, %(trip)s, %(direction_id)s, %(stop_id)s, currval('inferno.run_index'),
%(call_time)s, %(source)s, %(deviation)s, %(feed_index)s, %(date)s)
ON CONFLICT DO NOTHING"""
def common(lis: list):
"""Return the most common value in a list"""
return Counter(lis).most_common(1)[0][0]
def mask(lis: list, key: Callable, keep_last=None) -> list:
"""
Create a mask on `lis` using the `key` function.
`key` will be evaluated on pairs of items in `lis`.
Returned list will only include items where `key` evaluates to True.
Arguments:
keep_last (boolean): In a sequence of items where key() is False,
keep the last one.
"""
result = [lis[0]]
for item in lis[1:]:
if key(item, result[-1]):
result.append(item)
elif keep_last is True:
result[-1] = item
return result
def desc2fn(description: tuple) -> tuple:
"""Extract tuple of field names from psycopg2 cursor.description."""
return tuple(d.name for d in description)
def compare_dist(a, b):
try:
return a.distance >= b.distance
except TypeError:
# Don't be lenient when there's bad data: return False.
return False
def toutc(timestamp):
return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC)
def get_positions(cursor, positions_table, query_args):
"""
Compile list of positions for a vehicle, using a list of positions
and filtering based on positions that reflect change in pattern or next_stop.
"""
runs = []
query = VEHICLE_QUERY.format(positions_table or "rt.vehicle_positions")
# load up cursor with every position for vehicle
cursor.execute(query, query_args)
if cursor.rowcount == 0:
logging.warning(
"No rows found for %s on %s", query_args["vehicle"], query_args["date"]
)
return []
# dummy position for comparison with first row
prev = namedtuple("prev", ("distance", "trip_id"))(0, None)
for position in cursor:
# If we're on a new trip, start a new run
if not position.trip_id == prev.trip_id:
runs.append([])
# If the distance has not declined, append the position
if compare_dist(position, prev) or position.trip_id != prev.trip_id:
runs[-1].append(position)
prev = position
return runs
def filter_positions(runs):
"""Filter runs to elimate shorties."""
return [run for run in runs if len(run) > 2 and len(set(r.seq for r in run)) > 1]
def get_stoptimes(cursor, tripid, date, epsg):
logging.debug("Fetching stoptimes for %s", tripid)
fields = {"trip": tripid, "date": date, "epsg": epsg}
cursor.execute(SELECT_STOPTIMES, fields)
if cursor.rowcount == 0:
logging.warning(
"Couldn't find any stoptimes in date range, running simple query: %s",
tripid,
)
logging.debug(cursor.query.decode("utf8"))
cursor.execute(SELECT_STOPTIMES_PLAIN, fields)
return cursor.fetchall()
def extrapolate(run, stoptimes, method=None):
"""
Extrapolating is hard. Depending on the input data points, extrapolated
data could produce impossible results, e.g. an extrapoled time being less
than a known time. This is true even for linear extrapolations.
This function may run multiple extrapolations, counterintuitively using less
data until a reasonable result is obtained. In the extreme, a linear extrapolation
from two observations will always provide a plausible (if rough) estimate.
"""
xs = [x.distance for x in run]
ys = [x.time for x in run]
data = [x.distance for x in stoptimes]
result = []
# Use builtin comparison functions.
# Operations are symmetric when extrapolating forward vs. backward.
if method == "E":
# Extrapolate forward (to End).
compare = ys[-1].__lt__
elif method == "S":
# Extrapolate backward (to Start).
compare = ys[0].__gt__
else:
raise ValueError("Invalid direction")
# Try to ensure that the extrapolated values are consistent with
# the previous values by using shorter versions of the run when necessary
while len(ys) > 1:
slope, intercept = np.polyfit(xs, ys, 1)
result = [slope * x + intercept for x in data]
if slope > 0 and all(compare(y) for y in result):
# Got a legal extrapolation, return calls.
# Slope should always be > 0, if it isn't there's a serious data issue.
break
# otherwise...
result = []
# Slice from the beginning (if forward) or end (if backward)
# of the run.
logging.debug(
"Invalid extrap. method: %s. slope: %s. comparison: %s",
method,
round(slope, 2),
[compare(y) for y in result],
)
logging.debug("new extrap length: %s", len(xs) - 1)
xs.pop(0 if method == "E" else -1)
ys.pop(0 if method == "E" else -1)
return [call(s, t, method) for s, t in zip(stoptimes, result)]
def call(stoptime, seconds, method=None):
"""
Returns a dict with route, direction, stop, call time and source.
Call time is in UTC.
"""
result = dict(stoptime._asdict(), call_time=toutc(seconds), source=method or "I")
result["deviation"] = result["call_time"] - stoptime.datetime
return result
def generate_calls(run: list, stops: list, mintime=None, maxtime=None) -> list:
"""
list of calls to be written
Args:
run: list generated from enumerate(positions)
stoptimes: list of scheduled stoptimes for this trip
mintime: don't extrapolate back before this time
maxtime: don't extrapolate forward past this time
"""
obs_distances = [p.distance for p in run]
obs_times = [p.time for p in run]
stop_positions = [x.distance for x in stops]
# Get the range of stop positions that can be interpolated based on data.
# The rest will be extrapolated
si = bisect_left(stop_positions, obs_distances[0])
ei = bisect(stop_positions, obs_distances[-1])
logging.debug("min, max\t%s\t%s", mintime, maxtime)
logging.debug("this run\t%s\t%s", toutc(obs_times[0]), toutc(obs_times[-1]))
if not stops[si:ei]:
logging.debug(
"No calls because no stops between si (%s) and ei (%s)",
obs_distances[0],
obs_distances[-1],
)
logging.debug(
"Stop distance range: %s - %s", min(stop_positions), max(stop_positions)
)
return []
# Interpolate main chunk of positions.
interpolated = np.interp(stop_positions[si:ei], obs_distances, obs_times)
calls = [call(stop, secs) for stop, secs in zip(stops[si:ei], interpolated)]
# Goal is to only extrapolate based on unique distances,
# When extrapolating forward, keep the oldest figure for a particular distance;
# when extrapolating back, keep the newest.
back_mask = mask(run, lambda x, y: x.distance > y.distance + MIN_EXTRAP_DIST)[
:EXTRAP_LENGTH
]
forward_mask = mask(
run, lambda x, y: x.distance > y.distance + MIN_EXTRAP_DIST, keep_last=True
)[-EXTRAP_LENGTH:]
# Extrapolate back for stops that occurred before observed positions.
if si > 0 and len(back_mask) > 1:
logging.debug("extrapolating backward. si = %s", si)
try:
backward = extrapolate(back_mask, stops[si - EXTRAP_COUNT : si], "S")
if mintime:
backward = [x for x in backward if x["call_time"] > mintime]
calls = backward + calls
except Exception as error:
logging.warning(
"Ignoring back extrapolation (trip_id = %s): %s ", run[0].trip_id, error
)
# import pdb
# pdb.set_trace()
# Extrapolate forward to the stops after the observed positions.
if ei < len(stops) and len(forward_mask) > 1:
logging.debug("extrapolating forward. ei = %s", ei)
try:
forward = extrapolate(forward_mask, stops[ei : ei + EXTRAP_COUNT], "E")
if maxtime:
forward = [x for x in forward if x["call_time"] < maxtime]
calls.extend(forward)
except Exception as error:
logging.warning(
"Ignoring forward extrapolation (trip_id = %s): %s",
run[0].trip_id,
error,
)
try:
assert increasing([x["call_time"] for x in calls])
except AssertionError:
logging.info("%s -- non-increasing calls", run[0].trip_id)
logging.debug(
"calc'ed call times: %s", [x["call_time"].timestamp() for x in calls]
)
logging.debug("observed positions: %s", obs_distances)
logging.debug("observed times: %s", obs_times)
logging.debug("stop positions: %s", stop_positions)
return calls
def increasing(L):
"""Check if array is increasing"""
return all(x <= y for x, y in zip(L, L[1:]))
def track_vehicle(
vehicle_id, query_args: dict, conn_kwargs: dict, calls_table, positions_table=None
):
"""Generate calls for a single vehicle in the database"""
positions_table = positions_table or "rt.vehicle_positions"
query_args["vehicle"] = vehicle_id
with psycopg2.connect(**conn_kwargs) as conn:
psycopg2.extensions.register_type(DEC2FLOAT)
logging.info("STARTING %s", vehicle_id)
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
rawruns = get_positions(cursor, positions_table, query_args)
# filter out short runs and ones with few stops
runs = filter_positions(rawruns)
if len(rawruns) > len(runs):
logging.debug(
"skipping %d short runs, query: %s",
len(rawruns) - len(runs),
query_args,
)
# Compute temporal bounds of each run.
starts = [None] + [toutc(run[0].time) for run in runs[:-1]]
ends = [toutc(run[-1].time) for run in runs[1:]] + [None]
# Counter is just for logging.
lenc = 0
# each run will become a trip
for run, start, end in zip(runs, starts, ends):
if not run:
continue
if len(run) <= 2:
logging.debug(
"short run (%d positions), v_id=%s, %s",
len(run),
query_args["vehicle"],
run[0].time,
)
continue
# Assume most common trip is the correct one.
trip_id = common([x.trip_id for x in run])
# Get the scheduled list of stops for this trip.
stoptimes = get_stoptimes(
cursor, trip_id, query_args["date"], query_args["epsg"]
)
if any(x.distance is None for x in stoptimes):
logging.warning(
"Missing stoptimes trip_id= %s, date= %s",
trip_id,
query_args["date"],
)
continue
# Generate (infer) calls.
calls = generate_calls(run, stoptimes, mintime=start, maxtime=end)
# update run_index sequence
cursor.execute("SELECT nextval('inferno.run_index')")
# write calls to sink
cursor.executemany(
INSERT.format(calls_table),
[dict(trip=trip_id, vehicle=vehicle_id, **c) for c in calls],
)
lenc += len(calls)
conn.commit()
logging.debug("%s", cursor.statusmessage)
logging.info("COMMIT vehicle= %s, calls= %s", vehicle_id, lenc)
def get_cpus():
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
def connection_params():
"""Check the environment for postgresql connection parameters"""
pg = {
"PGUSER": "user",
"PGHOST": "host",
"PGPORT": "port",
"PGSERVICE": "service",
"PGPASSWORD": "password",
"PGPASSFILE": "passfile",
}
params = dict()
params.update({v: os.environ[k] for k, v in pg.items() if k in os.environ})
return params
def main(): # pragma: no cover
"""Run command line script"""
# connectionstring: str, table, date, vehicle=None
parser = argparse.ArgumentParser()
parser.add_argument("date", type=str)
parser.add_argument(
"--calls", type=str, default=os.environ.get("CALLS", "inferno.calls")
)
parser.add_argument(
"--positions",
type=str,
default=os.environ.get("POSITIONS", "rt.vehicle_positions"),
)
parser.add_argument("--vehicle", type=str)
parser.add_argument(
"--epsg",
type=int,
default=int(os.environ.get("EPSG", 4326)),
help="projection in which to calculate distances",
)
parser.add_argument(
"--debug", action="store_true", help="Run verbosely and without parallelism"
)
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument(
"--incomplete", action="store_true", help="Restart an incomplete date"
)
parser.add_argument(
"--jobs",
type=int,
help="Number of jobs to run. Defaults to %s" % get_cpus(),
default=get_cpus(),
)
args = parser.parse_args()
conn_kwargs = connection_params()
if args.debug or args.verbose:
logger.setLevel(logging.DEBUG)
logging.debug("cli: %s", args)
logging.debug("connection: %s", conn_kwargs)
elif args.quiet:
logger.setLevel(logging.WARNING)
if args.vehicle:
vehicles = [args.vehicle]
else:
with psycopg2.connect(**conn_kwargs) as conn:
psycopg2.extensions.register_type(DEC2FLOAT)
with conn.cursor() as cursor:
logging.info("Finding vehicles for %s", args.date)
cursor.execute(SELECT_VEHICLE.format(args.positions), (args.date,))
vehicles = [x[0] for x in cursor.fetchall()]
if args.incomplete:
logging.info("Removing already-called vehicles")
cursor.execute(
SELECT_CALLED_VEHICLES.format(calls=args.calls), (args.date,)
)
called = set(x[0] for x in cursor.fetchall())
vehicles = set(vehicles).difference(called)
logging.info("Removed %s", len(called))
logging.info("Found %s vehicles", len(vehicles))
itervehicles = zip(
vehicles,
cycle([{"date": args.date, "epsg": args.epsg}]),
cycle([conn_kwargs]),
cycle([args.calls]),
cycle([args.positions]),
)
if args.debug or args.jobs == 1:
for i in itervehicles:
track_vehicle(*i)
else:
with Pool(args.jobs) as pool:
pool.starmap(track_vehicle, itervehicles)
logging.info("completed %s", args.date)
if __name__ == "__main__":
main()
|
the-stack_0_13530 | # -*- coding: utf-8 -*-
import numpy as np
import scipy.signal as sig
from math import pi
import math
def PLL(input_signal, Fs, lenght, N):
"""Synchronizes the input carryer signal with the local oscillator to avoid crosstalk due to phase and frequency differences between TX and RX.
Parameters
----------
input_signal : 1D array of floats
Complex signal received at the input of the demodulator.
Fs : float
Sampling frequency.
lenght : int
Lenght of the output vector.
N : int
Samples per period of the sinusuidal wave.
Returns
-------
cos_out : 1D array of floats
Cosine wave synchronized with the input signal.
sin_out : 1D array of floats
Sine wave synchronized with the input signal.
"""
# K_p = 0.2667
# K_i = 0.0178
zeta = .707 # damping factor
k = 1
Bn = 0.01*Fs #Noise Bandwidth
K_0 = 1 # NCO gain
K_d = 1/2 # Phase Detector gain
K_p = (1/(K_d*K_0))*((4*zeta)/(zeta+(1/(4*zeta)))) * \
(Bn/Fs) # Proporcional gain
K_i = (1/(K_d*K_0))*(4/(zeta+(1/(4*zeta)**2))) * \
(Bn/Fs)**2 # Integrator gain
integrator_out = 0
phase_estimate = np.zeros(lenght)
e_D = [] # phase-error output
e_F = [] # loop filter output
sin_out_n = np.zeros(lenght)
cos_out_n = np.ones(lenght)
for n in range(lenght-1):
# phase detector
try:
e_D.append(
math.atan(input_signal[n] * (cos_out_n[n] + sin_out_n[n])))
except IndexError:
e_D.append(0)
# loop filter
integrator_out += K_i * e_D[n]
e_F.append(K_p * e_D[n] + integrator_out)
# NCO
try:
phase_estimate[n+1] = phase_estimate[n] + K_0 * e_F[n]
except IndexError:
phase_estimate[n+1] = K_0 * e_F[n]
sin_out_n[n+1] = -np.sin(2*np.pi*(k/N)*(n+1) + phase_estimate[n])
cos_out_n[n+1] = np.cos(2*np.pi*(k/N)*(n+1) + phase_estimate[n])
sin_out_n = -sin_out_n
cos_out = cos_out_n[280:400]
sin_out = sin_out_n[280:400]
for i in range(18):
cos_out = np.concatenate(
(cos_out, cos_out_n[280:400], cos_out_n[280:400]), axis=None)
sin_out = np.concatenate(
(sin_out, sin_out_n[280:400], sin_out_n[280:400]), axis=None)
return(cos_out, sin_out)
def LPF(signal, fc, Fs):
"""Low pass filter, Butterworth approximation.
Parameters
----------
signal : 1D array of floats
Signal to be filtered.
fc : float
Cutt-off frequency.
Fs : float
Sampling frequency.
Returns
-------
signal_filt : 1D array of floats
Filtered signal.
W : 1D array of floats
The frequencies at which 'h' was computed, in Hz.
h : complex
The frequency response.
"""
o = 5 # order of the filter
fc = np.array([fc])
wn = 2*fc/Fs
[b, a] = sig.butter(o, wn, btype='lowpass')
[W, h] = sig.freqz(b, a, worN=1024)
W = Fs*W/(2*pi)
signal_filt = sig.lfilter(b, a, signal)
return(signal_filt, W, h)
def matched_filter(signal, template):
"""Convolutes the baseband signal with the template of the impulse response used in the modulator (Square Root Raised Cosine) to increase the SNR.
Parameters
----------
signal : 1D array of floats
Baseband signal to be filtered.
template : 1D array of floats
Impulse response of the filter used at the signal shaping block
Returns
-------
signal_filt : 1D array of floats
Filtered signal.
"""
signal_filt = np.convolve(signal, template, 'full')
return(signal_filt)
def downsampler(signal, packet_s, upsampler_f):
"""The algorithm analyzes the synchronization symbols and tries to find the sample where the value of the symbol is maximum. After that, is possible to estimate in which sample the information begins to appear on the signal (i.e. detects the delay)
Parameters
----------
signal : 1D array of floats
Baseband signal.
packet_s : int
Number of bits in the transmitted packet.
upsampler_f : int
Upsampler factor used at the modulator.
Returns
-------
symbols : 1D array of floats
The sampled symbols.
"""
e = 0
gardner_e = []
peak_sample = 0
peak_sample_acc = []
low_point = 0
threshold = 4
for i in range(len(signal)):
if signal[low_point] < -threshold:
if signal[i] > threshold:
e = (abs(signal[(i+1)]) -
abs(signal[i-1])) * abs(signal[i])
gardner_e.append(e)
if e > 0.8:
peak_sample = peak_sample + 1
peak_sample_acc.append(peak_sample)
elif e < -0.8:
peak_sample = peak_sample - 1
peak_sample_acc.append(peak_sample)
else:
break
else:
peak_sample = peak_sample + 1
peak_sample_acc.append(peak_sample)
else:
low_point = low_point + 1
peak_sample = peak_sample + 1
peak_sample_acc.append(peak_sample)
# 450 is the number of samples before the convergence symbol of the algorithm.
cut_i = peak_sample - 450
cut_f = cut_i + int((packet_s/4)*upsampler_f)
print("Cut_i = ", cut_i)
print("Cut_f = ", cut_f)
# For the code to still work, even when there is a big BER, this secction is required.
if cut_i > 730:
signal = signal[261:2306+510]
elif cut_i < 690:
signal = signal[261:2306+510]
else:
signal = signal[cut_i:cut_f]
symbols = signal[slice(0, len(signal), upsampler_f)]
return(symbols)
def demapper(symbols_I, symbols_Q, packetSize, threshold = 3.0):
"""Generates an array of bits using the values based on the 16QAM indexing vector.
- If the symbol amplitude is between 0 and the threshold, it corresponds to the bits 10, if it's greater than the threshold, it corresponds to the sequence 11.
- If the symbol amplitude is between 0 and -threshold, it corresponds to the bits 01, if it's lower than -threshold, it corresponds to the sequence 00.
After the recovery of the bits, both vectors (I and Q) are merged, generating the output bitstream.
Parameters
----------
symbols_I : 1D array of floats
Downsampled in-phase symbols.
symbols_Q : 1D array of floats
Downsampled quadrature symbols.
packetSize : int
Number of bits in the transmitted packet.
threshold : float, optional
The limit between two symbols in the 16QAM constellation. The default value is 3.
Returns
-------
bitstream : 1D array of ints
Bits transmitted.
"""
Ns = int(packetSize/4)
bits_I = []
bits_Q = []
for i in range(Ns):
if symbols_I[i] >= 0 and symbols_I[i] <= threshold:
bits_I.append(1)
bits_I.append(0)
if symbols_I[i] > threshold:
bits_I.append(1)
bits_I.append(1)
if symbols_I[i] < 0 and symbols_I[i] >= -threshold:
bits_I.append(0)
bits_I.append(1)
if symbols_I[i] < -threshold:
bits_I.append(0)
bits_I.append(0)
if symbols_Q[i] >= 0 and symbols_Q[i] <= threshold:
bits_Q.append(1)
bits_Q.append(0)
if symbols_Q[i] > threshold:
bits_Q.append(1)
bits_Q.append(1)
if symbols_Q[i] < 0 and symbols_Q[i] >= -threshold:
bits_Q.append(0)
bits_Q.append(1)
if symbols_Q[i] < -threshold:
bits_Q.append(0)
bits_Q.append(0)
bits_I = list(map(int, bits_I))
bits_Q = list(map(int, bits_Q))
bitStream = np.zeros(packetSize)
for i in range(len(bits_I)):
bitStream[2*i] = bits_I[i]
bitStream[2*i-1] = bits_Q[i-1]
return(bitStream)
|
the-stack_0_13531 | from . import FieldType
from . import FieldValue
from . import FieldTypeRegression
from . import FieldValueRegression
def rec(node, result, randomstate):
node.randomstate = randomstate
v = node.pick_value()
result[node.field_name] = v.get_field_value()
if v.next_field and len(v.next_field.values) > 0:
rec(v.next_field, result, randomstate)
def process_type(node, randomstate):
field = FieldType()
field.values = []
for field_key, _ in node.items():
if field_key not in ('count', 'ratio', 'total'):
field.field_name = field_key
for value_key, field_value in node[field_key].items():
if value_key not in ['total']:
field.values.append(process_value(field_value, value_key, randomstate))
return field
def process_stats(node, randomstate):
outer_type = None
current_value = None
for field_key, _ in node['stats'].items():
next_type = FieldTypeRegression()
next_type.field_name = field_key
value = FieldValueRegression(randomstate)
value.count = node['stats'][field_key]["count"]
value.mean = node['stats'][field_key]["mean"]
value.var = node['stats'][field_key]["var"]
value.std = node['stats'][field_key]["std"]
value.min = node['stats'][field_key]["min"]
value.max = node['stats'][field_key]["max"]
value.median = node['stats'][field_key]["median"]
if "best_fit_distribution" in node['stats'][field_key]:
value.best_fit = node['stats'][field_key]["best_fit_distribution"]
value.fit_parameter = node['stats'][field_key]["fit_parameter"]
else:
value.best_fit = None
value.fit_parameter = None
value.next_field = None
next_type.values = []
next_type.values.append(value)
if not outer_type:
outer_type = next_type
else:
current_value.next_field = next_type
current_value = value
return outer_type
def process_value(node, value, randomstate):
field_value = FieldValue()
field_value.field_value = value
field_value.ratio = node['ratio']
field_value.count = node['count']
if 'stats' in node:
field_value.next_field = process_stats(node, randomstate)
else:
field_value.next_field = process_type(node, randomstate)
return field_value
|
the-stack_0_13532 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C6_timing_layer_bar_cff import Phase2C6_timing_layer_bar
process = cms.Process('PROD',Phase2C6_timing_layer_bar)
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load('Configuration.Geometry.GeometryExtended2026D44_cff')
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Validation.HGCalValidation.hfnoseSimHitStudy_cfi')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['phase2_realistic']
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HGCalValidation')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:step1.root',
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hfnSimHitD44tt.root'),
closeFileFast = cms.untracked.bool(True)
)
process.analysis_step = cms.Path(process.hgcalSimHitStudy)
# Schedule definition
process.schedule = cms.Schedule(process.analysis_step)
|
the-stack_0_13533 | '''
Problem 72 - Counting fractions
Consider the fraction, n/d, where n and d are positive integers.
If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that there are 21 elements in this set.
How many elements would be contained in the set of reduced proper fractions for d ≤ 1,000,000 ?
'''
'''
Solution : It can be seen that the solution is:
sum(phi(i)) for i in range(2, 1000001)
We can use Euler's formula and a seive to compute it easily
'''
N = 10**6
# import numpy as np
# arr = np.arange(N+1)
# print(arr.shape)
arr = [i for i in range(N + 1)]
result = 0
for i in range(2, N + 1):
if arr[i] == i:
for j in range(i, N + 1, i):
arr[j] = (arr[j] // i) * (i - 1)
result += arr[i]
print(result)
|
the-stack_0_13534 | import numpy as np
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class TestBidirectionalLanguageModel(ModelTestCase):
def setUp(self):
super().setUp()
self.expected_embedding_shape = (2, 8, 14)
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_bidirectional.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_bidirectional_lm_can_train_save_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent(keys_to_ignore=["batch_weight"])
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
result = self.model(**training_tensors)
assert set(result) == {
"loss",
"forward_loss",
"backward_loss",
"lm_embeddings",
"noncontextual_token_embeddings",
"mask",
"batch_weight",
}
# The model should preserve the BOS / EOS tokens.
embeddings = result["lm_embeddings"]
assert tuple(embeddings.shape) == self.expected_embedding_shape
loss = result["loss"].item()
forward_loss = result["forward_loss"].item()
backward_loss = result["backward_loss"].item()
np.testing.assert_almost_equal(loss, (forward_loss + backward_loss) / 2, decimal=3)
class TestBidirectionalLanguageModelUnsampled(TestBidirectionalLanguageModel):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_bidirectional_unsampled.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
|
the-stack_0_13536 | # -*- coding: utf-8 -*-
from openerp import models, fields, api
class Wizard(models.TransientModel):
_name = 'openacademy.wizard'
def _default_session(self):
return self.env['openacademy.session'].browse(self._context.get('active_ids'))
session_ids = fields.Many2many('openacademy.session',
string="Session",
required=True,
default=_default_session)
attendee_ids = fields.Many2many('res.partner',
string="Attendees")
@api.multi
def subscribe(self):
for session in self.session_ids:
session.attendee_ids |= self.attendee_ids
return {}
|
the-stack_0_13540 | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib import admin
from django.urls import path, include # add this
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("rest.urls")),
path("", include("authentication.urls")), # add this
path("", include("app.urls")) # add this
]
|
the-stack_0_13541 | """
Blocklist management
"""
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List
import pkg_resources
from .configuration import BandersnatchConfig
if TYPE_CHECKING:
from configparser import SectionProxy
# The API_REVISION is incremented if the plugin class is modified in a
# backwards incompatible way. In order to prevent loading older
# broken plugins that may be installed and will break due to changes to
# the methods of the classes.
PLUGIN_API_REVISION = 2
PROJECT_PLUGIN_RESOURCE = f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.project"
METADATA_PLUGIN_RESOURCE = (
f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.metadata"
)
RELEASE_PLUGIN_RESOURCE = f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.release"
RELEASE_FILE_PLUGIN_RESOURCE = (
f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.release_file"
)
class Filter:
"""
Base Filter class
"""
name = "filter"
deprecated_name: str = ""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.configuration = BandersnatchConfig().config
if (
"plugins" not in self.configuration
or "enabled" not in self.configuration["plugins"]
):
return
split_plugins = self.configuration["plugins"]["enabled"].split("\n")
if (
"all" not in split_plugins
and self.name not in split_plugins
# TODO: Remove after 5.0
and not (self.deprecated_name and self.deprecated_name in split_plugins)
):
return
self.initialize_plugin()
def initialize_plugin(self) -> None:
"""
Code to initialize the plugin
"""
# The initialize_plugin method is run once to initialize the plugin. This should
# contain all code to set up the plugin.
# This method is not run in the fast path and should be used to do things like
# indexing filter databases, etc that will speed the operation of the filter
# and check_match methods that are called in the fast path.
pass
def filter(self, metadata: dict) -> bool:
"""
Check if the plugin matches based on the package's metadata.
Returns
=======
bool:
True if the values match a filter rule, False otherwise
"""
return False
def check_match(self, **kwargs: Any) -> bool:
"""
Check if the plugin matches based on the arguments provides.
Returns
=======
bool:
True if the values match a filter rule, False otherwise
"""
return False
@property
def allowlist(self) -> "SectionProxy":
return self.configuration["allowlist"]
@property
def blocklist(self) -> "SectionProxy":
return self.configuration["blocklist"]
class FilterProjectPlugin(Filter):
"""
Plugin that blocks sync operations for an entire project
"""
name = "project_plugin"
class FilterMetadataPlugin(Filter):
"""
Plugin that blocks sync operations for an entire project based on info fields.
"""
name = "metadata_plugin"
class FilterReleasePlugin(Filter):
"""
Plugin that modifies the download of specific releases or dist files
"""
name = "release_plugin"
class FilterReleaseFilePlugin(Filter):
"""
Plugin that modify the download of specific release or dist files
"""
name = "release_file_plugin"
class LoadedFilters:
"""
A class to load all of the filters enabled
"""
ENTRYPOINT_GROUPS = [
PROJECT_PLUGIN_RESOURCE,
METADATA_PLUGIN_RESOURCE,
RELEASE_PLUGIN_RESOURCE,
RELEASE_FILE_PLUGIN_RESOURCE,
]
def __init__(self, load_all: bool = False) -> None:
"""
Loads and stores all of specified filters from the config file
"""
self.config = BandersnatchConfig().config
self.loaded_filter_plugins: Dict[str, List["Filter"]] = defaultdict(list)
self.enabled_plugins = self._load_enabled()
if load_all:
self._load_filters(self.ENTRYPOINT_GROUPS)
def _load_enabled(self) -> List[str]:
"""
Reads the config and returns all the enabled plugins
"""
enabled_plugins: List[str] = []
try:
config_plugins = self.config["plugins"]["enabled"]
split_plugins = config_plugins.split("\n")
if "all" in split_plugins:
enabled_plugins = ["all"]
else:
for plugin in split_plugins:
if not plugin:
continue
enabled_plugins.append(plugin)
except KeyError:
pass
return enabled_plugins
def _load_filters(self, groups: List[str]) -> None:
"""
Loads filters from the entry-point groups specified in groups
"""
for group in groups:
plugins = set()
for entry_point in pkg_resources.iter_entry_points(group=group):
plugin_class = entry_point.load()
plugin_instance = plugin_class()
if (
"all" in self.enabled_plugins
or plugin_instance.name in self.enabled_plugins
or plugin_instance.deprecated_name in self.enabled_plugins
):
plugins.add(plugin_instance)
self.loaded_filter_plugins[group] = list(plugins)
def filter_project_plugins(self) -> List[Filter]:
"""
Load and return the project filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if PROJECT_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([PROJECT_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[PROJECT_PLUGIN_RESOURCE]
def filter_metadata_plugins(self) -> List[Filter]:
"""
Load and return the metadata filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if METADATA_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([METADATA_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[METADATA_PLUGIN_RESOURCE]
def filter_release_plugins(self) -> List[Filter]:
"""
Load and return the release filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if RELEASE_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([RELEASE_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[RELEASE_PLUGIN_RESOURCE]
def filter_release_file_plugins(self) -> List[Filter]:
"""
Load and return the release file filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if RELEASE_FILE_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([RELEASE_FILE_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[RELEASE_FILE_PLUGIN_RESOURCE]
|
the-stack_0_13542 | __author__ = "Christopher Dean"
__copyright__ = ""
__credits__ = ["Christopher Dean"]
__version__ = ""
__maintainer__ = "Christopher Dean"
__email__ = "[email protected]"
__status__ = "I'm doing fine."
import sys
import argparse
def parse_cmdline_params(cmdline_params):
info = "Remove duplicate annotations from FASTA formatted reference file"
parser = argparse.ArgumentParser(description=info)
parser.add_argument('-r', '--reference_sequence', type=str, required=True,
help='Please provide a FASTA formatted reference file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Please provide an output file name in FASTA format')
return parser.parse_args(cmdline_params)
def read_fasta(filename):
"""
Removes duplicate annotations from FASTA file
:param (str) filename: FASTA file
:return (dict) records: A dictionary of FASTA records
"""
with open(filename, 'r') as fp:
records = {}
for line in fp:
key = line
value = fp.next()
if key in records:
if len(value) > len(records[key]):
records[key] = value
else:
records[key] = value
fp.close()
return records
def write_fasta(filename, records):
"""
Writes a dictionary of FASTA records to file
:param (str) filename: Output file
:param (dict) records: A dictionary of FASTA records
:return (void): Void method
"""
handler = open(filename, 'w')
for k, v in records.items():
handler.write(k)
handler.write(v)
handler.close()
if __name__ == "__main__":
opts = parse_cmdline_params(sys.argv[1:])
records = read_fasta(opts.reference_sequence)
write_fasta(opts.output, records)
|
the-stack_0_13545 | import random
class BaseTank:
def __init__(self, tank_data):
self.death = False
self.model = tank_data.get('short_name')
self.nation = tank_data.get('nation')
default_profile = tank_data.get('default_profile')
self.armor_dict = default_profile.get('armor').get('hull')
self.health = default_profile.get('hp')
self.precision = round((1 - default_profile.get('gun').get('dispersion')) * 100) - 10
ammo_list = default_profile.get('ammo')
self.ammo_damage = None
for ammo in ammo_list:
if ammo.get('type') == 'ARMOR_PIERCING':
self.ammo_damage = ammo.get('damage')[2]
break
if not self.ammo_damage:
raise Exception('No ammo damage found for type Armor Piercing')
def __str__(self):
return f'''
Nation: {self.nation}
Model: {self.model}
Type: {type(self).__name__}
Armor: {self.armor_dict}
Health: {self.health}
Damage: {self.ammo_damage}
Precision: {self.precision}'''
def dodge(self):
return False
def inflict_damage(self, enemy_tank):
result = None
if not self.death:
if random.randint(1, 100) >= self.precision:
enemy_tank.receive_damage(self.ammo_damage)
result = 'hit'
else:
result = 'miss!'
return result
def receive_damage(self, damage_amount):
if not self.death:
self.health -= damage_amount
if self.health <= 0:
self.death = True
|
the-stack_0_13546 | import speedtest as st
import sys
import threading
import itertools
import time
from colorama import Fore
from plugin import plugin
class SpinnerThread(threading.Thread):
"""SpinnerThread class to show a spinner on command line while the progream is running"""
def __init__(self, label="Hmmm... ", delay=0.2):
super(SpinnerThread, self).__init__()
self.label = label
self.delay = delay
self.running = False
def start(self):
self.running = True
super(SpinnerThread, self).start()
def run(self):
chars = itertools.cycle(r'-\|/')
while self.running:
sys.stdout.write('\r' + self.label + next(chars))
sys.stdout.flush()
time.sleep(self.delay)
def stop(self):
self.running = False
self.join()
sys.stdout.write('\r')
sys.stdout.flush()
@plugin(network=True)
def speedtest(jarvis, s):
"""Runs a speedtest on your internet connection"""
try:
res = st.Speedtest()
except st.ConfigRetrievalError:
return jarvis.connection_error()
# Create a spinner on command line to show that its running
spinner = SpinnerThread('Running the test ', 0.15)
spinner.start()
res.get_best_server()
download_speed = res.download()
upload_speed = res.upload()
# results_dict = res.results.dict()
spinner.stop()
# Print the results
jarvis.say('Speed test results:', Fore.GREEN)
jarvis.say('Download: ' + pretty_speed(download_speed), Fore.GREEN)
jarvis.say('Upload: ' + pretty_speed(upload_speed), Fore.GREEN)
def pretty_speed(speed):
""" return speed value prettily accordingly in either bps, Kbps, Mbps, Gbps"""
unit = 'bps'
kmg = ['', 'K', 'M', 'G']
i = 0
while speed >= 1000:
speed /= 1000
i += 1
return "{:.2f}".format(speed) + ' ' + kmg[i] + unit
|
the-stack_0_13547 | import math
import re
import threading
import time
from typing import Any, Callable, List, Optional, Set, Tuple
import antlr4
from pytest import mark, param, raises, warns
from omegaconf import (
AnyNode,
Container,
DictConfig,
ListConfig,
OmegaConf,
_utils,
grammar_parser,
grammar_visitor,
)
from omegaconf._utils import nullcontext
from omegaconf.errors import (
GrammarParseError,
InterpolationKeyError,
InterpolationResolutionError,
UnsupportedInterpolationType,
)
TAB = "\t" # to be used in raw strings, e.g. `fr"C:\{TAB}foo"`
# Characters that are not allowed by the grammar in config key names.
INVALID_CHARS_IN_KEY_NAMES = r"""\{}()[].:"' """
UNQUOTED_SPECIAL = r"/-\+.$%*@?|" # special characters allowed in unquoted strings
# A fixed config that may be used (but not modified!) by tests.
BASE_TEST_CFG = OmegaConf.create(
{
# Standard data types.
"str": "hi",
"int": 123,
"float": 1.2,
"dict": {"a": 0, "b": {"c": 1}},
"list": [x - 1 for x in range(11)],
"null": None,
# Special cases.
"x@y": 123, # @ in name
"$x$y$z$": 456, # $ in name (beginning, middle and end)
"0": 0, # integer name
"FalsE": {"TruE": True}, # bool name
"None": {"null": 1}, # null-like name
"1": {"2": 12}, # dot-path with int keys
# Used in nested interpolations.
"str_test": "test",
"ref_str": "str",
"options": {"a": "A", "b": "B"},
"choice": "a",
"rel_opt": ".options",
}
)
# Parameters for tests of the "singleElement" rule when there is no interpolation.
# Each item is a tuple with three elements:
# - The id of the test.
# - The expression to be evaluated.
# - The expected result, that may be an exception. If it is a `GrammarParseError` then
# it is assumed that the parsing will fail. If it is another kind of exception then
# it is assumed that the parsing will succeed, but this exception will be raised when
# visiting (= evaluating) the parse tree. If the expected behavior is for the parsing
# to succeed, but a `GrammarParseError` to be raised when visiting it, then set the
# expected result to the pair `(None, GrammarParseError)`.
PARAMS_SINGLE_ELEMENT_NO_INTERPOLATION: List[Tuple[str, str, Any]] = [
# Special keywords.
("null", "null", None),
("true", "TrUe", True),
("false", "falsE", False),
("true_false", "true_false", "true_false"),
# Integers.
("int", "123", 123),
("int_pos", "+123", 123),
("int_neg", "-123", -123),
("int_underscore", "1_000", 1000),
("int_bad_underscore_1", "1_000_", "1_000_"),
("int_bad_underscore_2", "1__000", "1__000"),
("int_bad_underscore_3", "_1000", "_1000"),
("int_bad_zero_start", "007", "007"),
# Floats.
("float", "1.1", 1.1),
("float_no_int", ".1", 0.1),
("float_no_decimal", "1.", 1.0),
("float_minus", "-.2", -0.2),
("float_underscore", "1.1_1", 1.11),
("float_bad_1", "1.+2", "1.+2"),
("float_bad_2", r"1\.2", r"1\.2"),
("float_bad_3", "1.2_", "1.2_"),
("float_exp_1", "-1e2", -100.0),
("float_exp_2", "+1E-2", 0.01),
("float_exp_3", "1_0e1_0", 10e10),
("float_exp_4", "1.07e+2", 107.0),
("float_exp_5", "1e+03", 1000.0),
("float_exp_bad_1", "e-2", "e-2"),
("float_exp_bad_2", "01e2", "01e2"),
("float_inf", "inf", math.inf),
("float_plus_inf", "+inf", math.inf),
("float_minus_inf", "-inf", -math.inf),
("float_nan", "nan", math.nan),
("float_plus_nan", "+nan", math.nan),
("float_minus_nan", "-nan", math.nan),
# Unquoted strings.
# Note: raw strings do not allow trailing \, adding a space and stripping it.
(
"str_legal",
(r" a" + UNQUOTED_SPECIAL + r"\\ ").strip(),
(r" a" + UNQUOTED_SPECIAL + r"\ ").strip(),
),
("str_illegal_1", "a,=b", GrammarParseError),
("str_illegal_2", f"{chr(200)}", GrammarParseError),
("str_illegal_3", f"{chr(129299)}", GrammarParseError),
("str_dot", ".", "."),
("str_dollar", "$", "$"),
("str_colon", ":", ":"),
("str_ws_1", "hello world", "hello world"),
("str_ws_2", "a b\tc \t\t d", "a b\tc \t\t d"),
("str_esc_ws_1", r"\ hello\ world\ ", " hello world "),
("str_esc_ws_2", fr"\ \{TAB}\{TAB}", f" {TAB}{TAB}"),
("str_esc_comma", r"hello\, world", "hello, world"),
("str_esc_colon", r"a\:b", "a:b"),
("str_esc_equal", r"a\=b", "a=b"),
("str_esc_parentheses", r"\(foo\)", "(foo)"),
("str_esc_brackets", r"\[foo\]", "[foo]"),
("str_esc_braces", r"\{foo\}", "{foo}"),
("str_esc_backslash", r" \ ".strip(), r" \ ".strip()),
("str_backslash_noesc", r"ab\cd", r"ab\cd"),
("str_esc_illegal_1", r"\#", GrammarParseError),
("str_esc_illegal_2", r""" \'\" """.strip(), GrammarParseError),
# Quoted strings.
("str_quoted_single", "'!@#$%^&*|()[]:.,\"'", '!@#$%^&*|()[]:.,"'),
("str_quoted_double", '"!@#$%^&*|()[]:.,\'"', "!@#$%^&*|()[]:.,'"),
("str_quoted_outer_ws_single", "' a \t'", " a \t"),
("str_quoted_outer_ws_double", '" a \t"', " a \t"),
("str_quoted_int", "'123'", "123"),
("str_quoted_null", "'null'", "null"),
("str_quoted_bool", "['truE', \"FalSe\"]", ["truE", "FalSe"]),
("str_quoted_list", "'[a,b, c]'", "[a,b, c]"),
("str_quoted_dict", '"{a:b, c: d}"', "{a:b, c: d}"),
("str_quoted_backslash_noesc_single", r"'a\b'", r"a\b"),
("str_quoted_backslash_noesc_double", r'"a\b"', r"a\b"),
("str_quoted_concat_bad_2", "'Hi''there'", GrammarParseError),
("str_quoted_too_many_1", "''a'", GrammarParseError),
("str_quoted_too_many_2", "'a''", GrammarParseError),
("str_quoted_too_many_3", "''a''", GrammarParseError),
("str_quoted_trailing_esc_1", r"'abc\\'", r" abc\ ".strip()),
("str_quoted_trailing_esc_2", r"'abc\\\\'", r" abc\\ ".strip()),
("str_quoted_no_esc_single_1", r"'abc\def'", r"abc\def"),
("str_quoted_no_esc_single_2", r"'abc\\def'", r"abc\\def"),
("str_quoted_no_esc_single_3", r"'\\\abc\def'", r"\\\abc\def"),
("str_quoted_no_esc_dollar_single", r"'abc\\$$'", r"abc\\$$"),
("str_quoted_no_esc_double_1", r'"abc\def"', r"abc\def"),
("str_quoted_no_esc_double_2", r'"abc\\def"', r"abc\\def"),
("str_quoted_no_esc_double_3", r'"\\\abc\def"', r"\\\abc\def"),
("str_quoted_no_esc_dollar_double", r'"abc\\$$"', r"abc\\$$"),
("str_quoted_bad_1", r'"abc\"', GrammarParseError),
("str_quoted_bad_2", r'"abc\\\"', GrammarParseError),
("str_quoted_esc_quote_single_1", r"'abc\'def'", "abc'def"),
("str_quoted_esc_quote_single_2", r"'abc\\\'def'", r"abc\'def"),
("str_quoted_esc_quote_single_3", r"'abc\\\\\'def'", r"abc\\'def"),
("str_quoted_esc_quote_single_4", r"'a\'b\'cdef\\\''", r"a'b'cdef\'"),
("str_quoted_esc_quote_single_bad", r"'abc\\'def'", GrammarParseError),
("str_quoted_esc_quote_double_1", r'"abc\"def"', 'abc"def'),
("str_quoted_esc_quote_double_2", r'"abc\\\"def"', r"abc\"def"),
("str_quoted_esc_quote_double_3", r'"abc\\\\\"def"', r'abc\\"def'),
("str_quoted_esc_quote_double_4", r'"a\"b\"cdef\\\""', r'a"b"cdef\"'),
("str_quoted_esc_quote_double_bad", r'"abc\\"def"', GrammarParseError),
("str_quoted_empty", "''", ""),
("str_quoted_basic", "'a'", "a"),
("str_quoted_tmp_1", r"'\a'", r"\a"),
("str_quoted_tmp_2", r"'a\'", GrammarParseError),
("str_quoted_inside_quote_different", "'\"'", '"'),
("str_quoted_inside_quote_same", r"'\''", "'"),
("str_quoted_extra_quote", r"'c:\\''", GrammarParseError),
# Lists and dictionaries.
("list", "[0, 1]", [0, 1]),
(
"dict",
"{x: 1, a: b, y: 1e2, null2: 0.1, true3: false, inf4: true}",
{"x": 1, "a": "b", "y": 100.0, "null2": 0.1, "true3": False, "inf4": True},
),
(
"dict_unquoted_key",
fr"{{a0-null-1-3.14-NaN- {TAB}-true-False-{UNQUOTED_SPECIAL}\(\)\[\]\{{\}}\:\=\ \{TAB}\,:0}}",
{
fr"a0-null-1-3.14-NaN- {TAB}-true-False-{UNQUOTED_SPECIAL}()[]{{}}:= {TAB},": 0
},
),
(
"dict_quoted",
"{0: 1, 'a': 'b', 1.1: 1e2, null: 0.1, true: false, -inf: true}",
GrammarParseError,
),
(
"structured_mixed",
"[10,str,3.14,true,false,inf,[1,2,3], 'quoted', \"quoted\", 'a,b,c']",
[
10,
"str",
3.14,
True,
False,
math.inf,
[1, 2, 3],
"quoted",
"quoted",
"a,b,c",
],
),
("dict_int_key", "{0: 0}", {0: 0}),
("dict_float_key", "{1.1: 0}", {1.1: 0}),
("dict_null_key", "{null: 0}", {None: 0}),
("dict_nan_like_key", "{'nan': 0}", GrammarParseError),
("dict_list_as_key", "{[0]: 1}", GrammarParseError),
(
"dict_bool_key",
"{true: true, false: 'false'}",
{True: True, False: "false"},
),
("empty_dict", "{}", {}),
("empty_list", "[]", []),
(
"structured_deep",
"{null0: [0, 3.14, false], true1: {a: [0, 1, 2], b: {}}}",
{"null0": [0, 3.14, False], "true1": {"a": [0, 1, 2], "b": {}}},
),
]
# Parameters for tests of the "singleElement" rule when there are interpolations.
PARAMS_SINGLE_ELEMENT_WITH_INTERPOLATION = [
# Node interpolations.
("dict_access", "${dict.a}", 0),
("list_access", "${list.0}", -1),
("dict_access_getitem", "${dict[a]}", 0),
("list_access_getitem", "${list[0]}", -1),
("getitem_first_1", "${[dict].a}", 0),
("getitem_first_2", "${[list][0]}", -1),
("dict_access_deep_1", "${dict.b.c}", 1),
("dict_access_deep_2", "${dict[b].c}", 1),
("dict_access_deep_3", "${dict.b[c]}", 1),
("dict_access_deep_4", "${dict[b][c]}", 1),
("list_access_underscore", "${list.1_0}", 9),
("list_access_bad_negative", "${list.-1}", InterpolationKeyError),
("dict_access_list_like_1", "${0}", 0),
("dict_access_list_like_2", "${1.2}", 12),
("bool_like_keys", "${FalsE.TruE}", True),
("null_like_key_ok", "${None.null}", 1),
("null_like_key_bad_case", "${NoNe.null}", InterpolationKeyError),
("null_like_key_quoted_1", "${'None'.'null'}", GrammarParseError),
("null_like_key_quoted_2", "${'None.null'}", GrammarParseError),
("dotpath_bad_type", "${dict.${float}}", (None, InterpolationResolutionError)),
("at_in_key", "${x@y}", 123),
("dollar_in_key", "${$x$y$z$}", 456),
# Interpolations in dictionaries.
("dict_interpolation_value", "{hi: ${str}, int: ${int}}", {"hi": "hi", "int": 123}),
("dict_interpolation_key", "{${str}: 0, ${null}: 1", GrammarParseError),
# Interpolations in lists.
("list_interpolation", "[${str}, ${int}]", ["hi", 123]),
# Interpolations in unquoted strings.
("str_dollar_and_inter", "$$${str}", "$$hi"),
("str_inter", "hi_${str}", "hi_hi"),
("str_esc_illegal_3", r"\${foo\}", GrammarParseError),
# Interpolations in quoted strings.
("str_quoted_inter", "'${null}'", "None"),
("str_quoted_esc_single_1", r"'ab\'cd\'\'${str}'", "ab'cd''hi"),
("str_quoted_esc_single_2", r"""'\\\${foo}'""", r"\${foo}"),
("str_quoted_esc_single_3", r"""'\\a_${str}'""", r"\\a_hi"),
("str_quoted_esc_single_4", r"""'a_${str}\\'""", r" a_hi\ ".strip()),
("str_quoted_esc_double_1", r'"ab\"cd\"\"${str}"', 'ab"cd""hi'),
("str_quoted_esc_double_2", r'''"\\\${foo}"''', r"\${foo}"),
("str_quoted_esc_double_3", r'''"\\a_${str}"''', r"\\a_hi"),
("str_quoted_esc_double_4", r'''"a_${str}\\"''', r" a_hi\ ".strip()),
("str_quoted_other_quote_double", """'double"'""", 'double"'),
("str_quoted_other_quote_single", '''"single'"''', "single'"),
("str_quoted_concat_bad_1", '"Hi "${str}', GrammarParseError),
("str_quoted_nested", "'${test:\"b\"}'", "b"),
("str_quoted_nested_esc_quotes", "'${test:'b'}'", "b"),
("str_quoted_esc_inter", r"""'\${test:"b"}'""", '${test:"b"}'),
("str_quoted_esc_inter_and_quotes", r"'\${test:\'b\'}'", "${test:'b'}"),
("str_quoted_esc_inter_nested_single_1", r"""'${test:'\${str}'}'""", "${str}"),
("str_quoted_esc_inter_nested_single_2", r"""'${test:'\\${str}'}'""", r"\hi"),
("str_quoted_esc_inter_nested_single_3", r"""'${test:'\\\${str}'}'""", r"\${str}"),
("str_quoted_esc_inter_nested_double_1", r'''"${test:"\${str}"}"''', "${str}"),
("str_quoted_esc_inter_nested_double_2", r'''"${test:"\\${str}"}"''', r"\hi"),
("str_quoted_esc_inter_nested_double_3", r'''"${test:"\\\${str}"}"''', r"\${str}"),
("str_quoted_error_inside_quotes", "'${missing_brace'", GrammarParseError),
# Whitespaces.
("ws_inter_node_outer", "${ \tdict.a \t}", 0),
("ws_inter_node_around_dot", "${dict .\ta}", GrammarParseError),
("ws_inter_node_inside_id", "${d i c t.a}", GrammarParseError),
("ws_inter_res_outer", "${\t test:foo\t }", "foo"),
("ws_inter_res_around_colon", "${test\t : \tfoo}", "foo"),
("ws_inter_res_inside_id", "${te st:foo}", GrammarParseError),
("ws_inter_res_inside_args", "${test:f o o}", "f o o"),
("ws_inter_res_namespace", "${ns1 .\t ns2 . test:0}", GrammarParseError),
("ws_inter_res_no_args", "${test: \t}", []),
("ws_list", "${test:[\t a, b, ''\t ]}", ["a", "b", ""]),
("ws_dict", "${test:{\t a : 1\t , b: \t''}}", {"a": 1, "b": ""}),
("ws_quoted_single", "${test: \t'foo'\t }", "foo"),
("ws_quoted_double", '${test: \t"foo"\t }', "foo"),
# Nested interpolations.
("nested_simple", "${${ref_str}}", "hi"),
("nested_select", "${options.${choice}}", "A"),
("nested_select_getitem", "${options[${choice}]}", "A"),
("nested_relative", "${${rel_opt}.b}", "B"),
("str_quoted_nested_deep_single", r"'AB${test:'CD${test:'EF'}GH'}'", "ABCDEFGH"),
("str_quoted_nested_deep_double", r'"AB${test:"CD${test:"EF"}GH"}"', "ABCDEFGH"),
("str_quoted_nested_deep_mixed", r'''"AB${test:'CD${test:"EF"}GH'}"''', "ABCDEFGH"),
(
"str_quoted_issue_615",
r'${test:"The root drive is: \\${str}:\\"}',
r" The root drive is: \hi:\ ".strip(),
),
# Resolver interpolations.
("no_args", "${test:}", []),
("space_in_args", "${test:a, b c}", ["a", "b c"]),
("list_as_input", "${test:[a, b], 0, [1.1]}", [["a", "b"], 0, [1.1]]),
("dict_as_input", "${test:{a: 1.1, b: b}}", {"a": 1.1, "b": "b"}),
("dict_as_input_quotes", "${test:{'a': 1.1, b: b}}", GrammarParseError),
("dict_typo_colons", "${test:{a: 1.1, b:: b}}", {"a": 1.1, "b": ": b"}),
("missing_resolver", "${MiSsInG_ReSoLvEr:0}", UnsupportedInterpolationType),
("at_in_resolver", "${y@z:}", GrammarParseError),
("ns_resolver", "${ns1.ns2.test:123}", 123),
# Nested resolvers.
("nested_resolver", "${${str_test}:a, b, c}", ["a", "b", "c"]),
("nested_deep", "${test:${${test:${ref_str}}}}", "hi"),
(
"nested_resolver_combined_illegal",
"${some_${resolver}:a, b, c}",
GrammarParseError,
),
("nested_args", "${test:${str}, ${null}, ${int}}", ["hi", None, 123]),
# Invalid resolver names.
("int_resolver_quoted", "${'0':1,2,3}", GrammarParseError),
("int_resolver_noquote", "${0:1,2,3}", GrammarParseError),
("float_resolver_quoted", "${'1.1':1,2,3}", GrammarParseError),
("float_resolver_noquote", "${1.1:1,2,3}", GrammarParseError),
("float_resolver_exp", "${1e1:1,2,3}", GrammarParseError),
("inter_float_resolver", "${${float}:1,2,3}", (None, InterpolationResolutionError)),
# NaN as dictionary key (a resolver is used here to output only the key).
("dict_nan_key_1", "${first:{nan: 0}}", math.nan),
("dict_nan_key_2", "${first:{${test:nan}: 0}}", GrammarParseError),
]
# Parameters for tests of the "configValue" rule (may contain interpolations).
PARAMS_CONFIG_VALUE = [
# String interpolations (top-level).
("str_top_basic", "bonjour ${str}", "bonjour hi"),
("str_top_quotes_single_1", "'bonjour ${str}'", "'bonjour hi'"),
(
"str_top_quotes_single_2",
"'Bonjour ${str}', I said.",
"'Bonjour hi', I said.",
),
("str_top_quotes_double_1", '"bonjour ${str}"', '"bonjour hi"'),
(
"str_top_quotes_double_2",
'"Bonjour ${str}", I said.',
'"Bonjour hi", I said.',
),
("str_top_missing_end_quote_single", "'${str}", "'hi"),
("str_top_missing_end_quote_double", '"${str}', '"hi'),
("str_top_missing_start_quote_double", '${str}"', 'hi"'),
("str_top_missing_start_quote_single", "${str}'", "hi'"),
("str_top_middle_quote_single", "I'd like ${str}", "I'd like hi"),
("str_top_middle_quote_double", 'I"d like ${str}', 'I"d like hi'),
("str_top_middle_quotes_single", "I like '${str}'", "I like 'hi'"),
("str_top_middle_quotes_double", 'I like "${str}"', 'I like "hi"'),
(
"str_top_any_char",
r"${str} " + UNQUOTED_SPECIAL + r"^!#&})][({,;",
r"hi " + UNQUOTED_SPECIAL + r"^!#&})][({,;",
),
("str_top_esc_inter", r"Esc: \${str}", "Esc: ${str}"),
("str_top_esc_inter_wrong_1", r"Wrong: $\{str\}", r"Wrong: $\{str\}"),
("str_top_esc_inter_wrong_2", r"Wrong: \${str\}", r"Wrong: ${str\}"),
("str_top_esc_backslash_1", r"Esc: \\${str}", r"Esc: \hi"),
("str_top_esc_backslash_2", r"Esc: \\\\${str}", r"Esc: \\hi"),
("str_top_quoted_braces_wrong", r"Wrong: \{${str}\}", r"Wrong: \{hi\}"),
("str_top_leading_dollars", r"$$${str}", "$$hi"),
("str_top_trailing_dollars", r"${str}$$$$", "hi$$$$"),
("str_top_leading_escapes_1", r"\\\\\${str}", r"\\${str}"),
("str_top_leading_escapes_2", r"\\\\ \${str}", r"\\\\ ${str}"),
("str_top_middle_escapes_1", r"abc\\\\\${str}", r"abc\\${str}"),
("str_top_middle_escapes_2", r"abc\\\\ \${str}", r"abc\\\\ ${str}"),
("str_top_trailing_escapes", r" ${str}\\\ ".strip(), r" hi\\\ ".strip()),
("str_top_concat_interpolations", "${null}${float}", "None1.2"),
("str_top_issue_617", r""" ${test: "hi\\" }"} """, r" hi\"} "),
# Whitespaces.
("ws_toplevel", " \tab ${str} cd ${int}\t", " \tab hi cd 123\t"),
# Unmatched braces.
("missing_brace_1", "${test:${str}", GrammarParseError),
("missing_brace_2", "${${test:str}", GrammarParseError),
("extra_brace", "${str}}", "hi}"),
]
def parametrize_from(
data: List[Tuple[str, str, Any]]
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""Utility function to create PyTest parameters from the lists above"""
return mark.parametrize(
["definition", "expected"],
[param(definition, expected, id=key) for key, definition, expected in data],
)
class TestOmegaConfGrammar:
"""
Test most grammar constructs.
Each method in this class tests the validity of expressions in a specific
setting. For instance, `test_single_element_no_interpolation()` tests the
"singleElement" parsing rule on expressions that do not contain interpolations
(which allows for faster tests without using any config object).
Tests that actually need a config object all re-use the same `BASE_TEST_CFG`
config, to avoid creating a new config for each test.
"""
@parametrize_from(PARAMS_SINGLE_ELEMENT_NO_INTERPOLATION)
def test_single_element_no_interpolation(
self, definition: str, expected: Any
) -> None:
parse_tree, expected_visit = self._parse("singleElement", definition, expected)
if parse_tree is None:
return
# Since there are no interpolations here, we do not need to provide
# callbacks to resolve them, and the quoted string callback can simply
# be the identity.
visitor = grammar_visitor.GrammarVisitor(
node_interpolation_callback=None, # type: ignore
resolver_interpolation_callback=None, # type: ignore
memo=None,
)
self._visit(lambda: visitor.visit(parse_tree), expected_visit)
@parametrize_from(PARAMS_SINGLE_ELEMENT_WITH_INTERPOLATION)
def test_single_element_with_resolver(
self, restore_resolvers: Any, definition: str, expected: Any
) -> None:
parse_tree, expected_visit = self._parse("singleElement", definition, expected)
OmegaConf.register_new_resolver("test", self._resolver_test)
OmegaConf.register_new_resolver("first", self._resolver_first)
OmegaConf.register_new_resolver("ns1.ns2.test", self._resolver_test)
self._visit_with_config(parse_tree, expected_visit)
@parametrize_from(PARAMS_CONFIG_VALUE)
def test_config_value(
self, restore_resolvers: Any, definition: str, expected: Any
) -> None:
parse_tree, expected_visit = self._parse("configValue", definition, expected)
OmegaConf.register_new_resolver("test", self._resolver_test)
self._visit_with_config(parse_tree, expected_visit)
@parametrize_from(
[
("trailing_comma", "${test:a,b,}", ["a", "b", ""]),
("empty_middle", "${test:a,,b}", ["a", "", "b"]),
("empty_first", "${test:,a,b}", ["", "a", "b"]),
("single_comma", "${test:,}", ["", ""]),
(
"mixed_with_ws",
"${test: ,a,b,\t,,c, \t \t ,d,, \t}",
["", "a", "b", "", "", "c", "", "d", "", ""],
),
]
)
def test_deprecated_empty_args(
self, restore_resolvers: Any, definition: str, expected: Any
) -> None:
OmegaConf.register_new_resolver("test", self._resolver_test)
parse_tree, expected_visit = self._parse("singleElement", definition, expected)
with warns(
UserWarning, match=re.escape("https://github.com/omry/omegaconf/issues/572")
):
self._visit_with_config(parse_tree, expected_visit)
def _check_is_same_type(self, value: Any, expected: Any) -> None:
"""
Helper function to validate that types of `value` and `expected are the same.
This function assumes that `value == expected` holds, and performs a "deep"
comparison of types (= it goes into data structures like dictionaries, lists
and tuples).
Note that dictionaries being compared must have keys ordered the same way!
"""
assert type(value) is type(expected)
if isinstance(value, (str, int, float)):
pass
elif isinstance(value, (list, tuple, ListConfig)):
for vx, ex in zip(value, expected):
self._check_is_same_type(vx, ex)
elif isinstance(value, (dict, DictConfig)):
for (vk, vv), (ek, ev) in zip(value.items(), expected.items()):
assert vk == ek, "dictionaries are not ordered the same"
self._check_is_same_type(vk, ek)
self._check_is_same_type(vv, ev)
elif value is None:
assert expected is None
else:
raise NotImplementedError(type(value))
def _get_expected(self, expected: Any) -> Tuple[Any, Any]:
"""Obtain the expected result of the parse & visit steps"""
if isinstance(expected, tuple):
# Outcomes of both the parse and visit steps are provided.
assert len(expected) == 2
return expected[0], expected[1]
elif expected is GrammarParseError:
# If only a `GrammarParseError` is expected, assume it happens in parse step.
return expected, None
else:
# If anything else is provided, assume it is the outcome of the visit step.
return None, expected
def _get_lexer_mode(self, rule: str) -> str:
return {"configValue": "DEFAULT_MODE", "singleElement": "VALUE_MODE"}[rule]
def _parse(
self, rule: str, definition: str, expected: Any
) -> Tuple[Optional[antlr4.ParserRuleContext], Any]:
"""
Parse the expression given by `definition`.
Return both the parse tree and the expected result when visiting this tree.
"""
def get_tree() -> antlr4.ParserRuleContext:
return grammar_parser.parse(
value=definition,
parser_rule=rule,
lexer_mode=self._get_lexer_mode(rule),
)
expected_parse, expected_visit = self._get_expected(expected)
if expected_parse is None:
return get_tree(), expected_visit
else: # expected failure on the parse step
with raises(expected_parse):
get_tree()
return None, None
def _resolver_first(self, item: Any, *_: Any) -> Any:
"""Resolver that returns the first element of its first input"""
try:
return next(iter(item))
except StopIteration:
assert False # not supposed to happen in current tests
def _resolver_test(self, *args: Any) -> Any:
"""Resolver that returns the list of its inputs"""
return args[0] if len(args) == 1 else list(args)
def _visit(self, visit: Callable[[], Any], expected: Any) -> None:
"""Run the `visit()` function to visit the parse tree and validate the result"""
if isinstance(expected, type) and issubclass(expected, Exception):
with raises(expected):
visit()
else:
result = visit()
if expected is math.nan:
# Special case since nan != nan.
assert math.isnan(result)
else:
assert result == expected
# We also check types in particular because instances of `Node` are very
# good at mimicking their underlying type's behavior, and it is easy to
# fail to notice that the result contains nodes when it should not.
self._check_is_same_type(result, expected)
def _visit_with_config(
self, parse_tree: antlr4.ParserRuleContext, expected: Any
) -> None:
"""Visit the tree using the default config `BASE_TEST_CFG`"""
if parse_tree is None:
return
cfg = BASE_TEST_CFG
def visit() -> Any:
return _utils._get_value(
cfg.resolve_parse_tree(
parse_tree,
# Create a dummy `AnyNode` (it should not actually be used in these
# grammer tests, but `resolve_parse_tree()` requires it).
node=AnyNode(None, parent=cfg),
key=None,
parent=cfg,
)
)
self._visit(visit, expected)
@mark.parametrize(
"expression",
[
"${foo}",
"${foo.bar}",
"${a_b.c123}",
"${ foo \t}",
"x ${ab.cd.ef.gh} y",
"$ ${foo} ${bar} ${boz} $",
"${foo:bar}",
"${foo : bar, baz, boz}",
"${foo:bar,0,a-b+c*d/$.%@?|}",
r"\${foo}",
"${foo.bar:boz}",
"${$foo.bar$.x$y}",
"${$0.1.2$}",
"${0foo}",
# getitem syntax
"${foo[bar]}",
"${foo.bar[baz]}",
"${foo[bar].baz}",
"${foo[bar].baz[boz]}",
"${[foo]}",
"${[foo].bar}",
"${[foo][bar]}",
# relative interpolations
"${..foo}",
"${..foo.bar}",
"${..foo[bar]}",
"${..[foo].bar}",
],
)
class TestMatchSimpleInterpolationPattern:
def test_regex(self, expression: str) -> None:
assert grammar_parser.SIMPLE_INTERPOLATION_PATTERN.match(expression) is not None
def test_grammar_consistency(self, expression: str) -> None:
# The expression should be valid according to the grammar.
grammar_parser.parse(
value=expression,
parser_rule="configValue",
lexer_mode="DEFAULT_MODE",
)
@mark.parametrize(
("expression", "is_valid_grammar"),
[
# Also invalid according to the grammar.
("${.}", False),
("${..}", False),
("${}", False),
("${foo", False),
("${0foo:bar}", False),
("${foo . bar}", False),
("${ns . f:var}", False),
("${$foo:bar}", False),
("${.foo:bar}", False),
(r"${foo:\}", False),
# Valid according to the grammar but not matched by the regex.
("${foo.${bar}}", True),
("${foo:${bar}}", True),
("${foo:'hello'}", True),
(r"\${foo", True),
],
)
class TestDoNotMatchSimpleInterpolationPattern:
def test_regex(self, expression: str, is_valid_grammar: bool) -> None:
assert grammar_parser.SIMPLE_INTERPOLATION_PATTERN.match(expression) is None
def test_grammar_consistency(self, expression: str, is_valid_grammar: bool) -> None:
ctx: Any = nullcontext() if is_valid_grammar else raises(GrammarParseError)
with ctx:
grammar_parser.parse(
value=expression,
parser_rule="configValue",
lexer_mode="DEFAULT_MODE",
)
def test_empty_stack() -> None:
"""
Check that an empty stack during ANTLR parsing raises a `GrammarParseError`.
"""
with raises(GrammarParseError):
grammar_parser.parse("ab}", lexer_mode="VALUE_MODE")
@mark.parametrize(
("inter", "key", "expected"),
[
# config root
# simple
param("${dict.bar}", "", 20, id="dict_value"),
param("${dict}", "", {"bar": 20}, id="dict_node"),
param("${list}", "", [1, 2], id="list_node"),
param("${list.0}", "", 1, id="list_value"),
# relative
param(
"${..list}",
"dict",
[1, 2],
id="relative:list_from_dict",
),
param("${..list.1}", "dict", 2, id="up_down"),
param("${..[list][1]}", "dict", 2, id="up_down_getitem"),
],
)
def test_parse_interpolation(inter: Any, key: Any, expected: Any) -> None:
cfg = OmegaConf.create(
{
"dict": {"bar": 20},
"list": [1, 2],
},
)
root = OmegaConf.select(cfg, key)
tree = grammar_parser.parse(
parser_rule="singleElement",
value=inter,
lexer_mode="VALUE_MODE",
)
def callback(inter_key: Any, memo: Optional[Set[int]]) -> Any:
assert isinstance(root, Container)
ret = root._resolve_node_interpolation(inter_key=inter_key, memo=memo)
return ret
visitor = grammar_visitor.GrammarVisitor(
node_interpolation_callback=callback,
resolver_interpolation_callback=None, # type: ignore
memo=None,
)
ret = visitor.visit(tree)
assert ret == expected
def test_custom_resolver_param_supported_chars() -> None:
supported_chars = r"abc123_:" + UNQUOTED_SPECIAL
c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})
OmegaConf.register_new_resolver("copy", lambda x: x)
assert c.dir1 == supported_chars
def test_valid_chars_in_interpolation() -> None:
valid_chars = "".join(
chr(i) for i in range(33, 128) if chr(i) not in INVALID_CHARS_IN_KEY_NAMES
)
cfg_dict = {valid_chars: 123, "inter": f"${{{valid_chars}}}"}
cfg = OmegaConf.create(cfg_dict)
# Test that we can access the node made of all valid characters, both
# directly and through interpolations.
assert cfg[valid_chars] == 123
assert cfg.inter == 123
@mark.parametrize("c", list(INVALID_CHARS_IN_KEY_NAMES))
def test_invalid_chars_in_interpolation(c: str) -> None:
def create() -> DictConfig:
return OmegaConf.create({"invalid": f"${{ab{c}de}}"})
# Test that all invalid characters trigger errors in interpolations.
if c in [".", "}"]:
# With '.', we try to access `${ab.de}`.
# With '}', we try to access `${ab}`.
cfg = create()
with raises(InterpolationKeyError):
cfg.invalid
elif c == ":":
# With ':', we try to run a resolver `${ab:de}`
cfg = create()
with raises(UnsupportedInterpolationType):
cfg.invalid
else:
# Other invalid characters should be detected at creation time.
with raises(GrammarParseError):
create()
def test_grammar_cache_is_thread_safe() -> None:
"""
This test ensures that we can parse strings across multiple threads in parallel.
Besides ensuring that the parsing does not hang nor crash, we also verify that
the lexer used in each thread is different.
"""
n_threads = 10
lexer_ids = []
stop = threading.Event()
def check_cache_lexer_id() -> None:
# Parse a dummy string to make sure the grammar cache is populated
# (this also checks that multiple threads can parse in parallel).
grammar_parser.parse("foo")
# Keep track of the ID of the cached lexer.
lexer_ids.append(id(grammar_parser._grammar_cache.data[0]))
# Wait until we are done.
while not stop.is_set():
time.sleep(0.1)
# Launch threads.
threads = []
for i in range(n_threads):
threads.append(threading.Thread(target=check_cache_lexer_id))
threads[-1].start()
# Wait until all threads have reported their lexer ID.
while len(lexer_ids) < n_threads:
time.sleep(0.1)
# Terminate threads.
stop.set()
for thread in threads:
thread.join()
# Check that each thread used a unique lexer.
assert len(set(lexer_ids)) == n_threads
|
the-stack_0_13549 | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'apply.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^', include('yard.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
the-stack_0_13551 | """
@author: Jonatan González Rodríguez <[email protected]>
"""
import re
import csv, datetime
import pysam
def reformat_nanomonsv(inp, out):
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('##') and 'ID=TR' in line:
filtered_vcf.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
new_DR = line.replace(
'ID=TR,Number=1,Type=Integer,Description="The number of reads around the breakpoints"',
'ID=DR,Number=1,Type=Integer,Description="# of reads supporting the reference allele."',
)
filtered_vcf.write(new_DR)
elif line.startswith('##') and 'ID=VR' in line:
new_DV = line.replace(
'ID=VR,Number=1,Type=Integer,Description="The number of variant supporting reads determined in the validation realignment step"',
'ID=DV,Number=1,Type=Integer,Description="# of reads supporting the variant allele."',
)
filtered_vcf.write(new_DV)
elif line.startswith('#CHROM'):
headers = (
line.strip()
.replace('TUMOR', 'NANOMON_Tumor')
.replace('CONTROL', 'NANOMON_Normal')
.split('\t')
)
filtered_vcf.write(
line.replace('TUMOR', 'NANOMON_Tumor').replace('CONTROL', 'NANOMON_Normal')
)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if columns[headers.index('FILTER')] == 'PASS':
columns[headers.index('REF')] = 'N'
Format = columns[headers.index('FORMAT')].replace('TR', 'DR').replace('VR', 'DV')
Format = 'GT:' + Format
Normal_Format = columns[headers.index('NANOMON_Normal')].split(':')
Normal_Format[0] = str(int(Normal_Format[0]) - int(Normal_Format[1]))
Tumor_Format = columns[headers.index('NANOMON_Tumor')].split(':')
Tumor_Format[0] = str(int(Tumor_Format[0]) - int(Tumor_Format[1]))
Normal = './.:' + ':'.join(Normal_Format)
Tumor = './.:' + ':'.join(Tumor_Format)
filtered_vcf.write(
'{}\t{}\t{}\t{}\n'.format('\t'.join(columns[0:8]), Format, Tumor, Normal)
)
else:
filtered_vcf.write(line)
vcf.close()
filtered_vcf.close()
def reformat_svim(inp, out, columnid, qual):
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('##') and 'ID=DP' in line:
new_DR = line.replace(
'ID=DP,Number=1,Type=Integer,Description="Read depth"',
'ID=DR,Number=1,Type=Integer,Description="# reads supporting the reference allele."',
)
filtered_vcf.write(new_DR)
elif line.startswith('##') and 'ID=AD' in line:
new_DV = line.replace(
'ID=AD,Number=R,Type=Integer,Description="Read depth for each allele"',
'ID=DV,Number=1,Type=Integer,Description="# of reads supporting the variant allele."',
)
filtered_vcf.write(new_DV)
elif line.startswith('#CHROM'):
headers = line.strip().split('\t')
filtered_vcf.write(line)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if int(columns[headers.index('QUAL')]) >= qual:
if 'DUP:TANDEM' in columns[headers.index('ALT')]:
columns[headers.index('ALT')] = '<DUP>'
Format = (
columns[headers.index('FORMAT')].replace('DP', 'DR').replace('AD', 'DV')
)
Format = re.split(':', Format)
del Format[1]
Format_info = re.split(':|,', columns[headers.index(columnid)])
del Format_info[1:3]
filtered_vcf.write(
'{}\t{}\t{}\n'.format(
'\t'.join(columns[0:8]), ':'.join(Format), ':'.join(Format_info)
)
)
elif 'DUP:INT' in columns[headers.index('ALT')]:
columns[headers.index('ALT')] = '<DUP>'
Format = (
columns[headers.index('FORMAT')].replace('DP', 'DR').replace('AD', 'DV')
)
Format_info = re.split(':|,', columns[headers.index(columnid)])
del Format_info[1]
filtered_vcf.write(
'{}\t{}\t{}\n'.format(
'\t'.join(columns[0:8]), Format, ':'.join(Format_info)
)
)
else:
if 'DEL' in columns[headers.index('ALT')]:
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) + 1)
Format = (
columns[headers.index('FORMAT')].replace('DP', 'DR').replace('AD', 'DV')
)
Format_info = re.split(':|,', columns[headers.index(columnid)])
del Format_info[1]
filtered_vcf.write(
'{}\t{}\t{}\n'.format(
'\t'.join(columns[0:8]), Format, ':'.join(Format_info)
)
)
else:
filtered_vcf.write(line)
vcf.close()
filtered_vcf.close()
def reformat_sniffles(inp, out):
print(inp, out)
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('#CHROM'):
headers = line.strip().split('\t')
new_SEQ = '##INFO=<ID=SVINSSEQ,Number=1,Type=String,Description="Sequence of insertion">\n'
filtered_vcf.write(new_SEQ)
filtered_vcf.write(line)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if 'DEL' in columns[headers.index('INFO')]:
columns[headers.index('REF')] = 'N'
columns[headers.index('ALT')] = '<DEL>'
filtered_vcf.write('\t'.join(columns) + '\n')
elif 'INS' in columns[headers.index('INFO')]:
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) - 1)
INFO = columns[headers.index('INFO')].split(';')
pos_idx = [i for i, x in enumerate(INFO) if x.startswith('END')][0]
INFO[pos_idx] = 'END=' + str(int(INFO[pos_idx].split('=')[1]) - 1)
columns[headers.index('INFO')] = ';'.join(INFO)
columns[headers.index('INFO')] += ';SVINSSEQ={}'.format(
columns[headers.index('ALT')]
)
columns[headers.index('ALT')] = '<INS>'
filtered_vcf.write('\t'.join(columns) + '\n')
else:
filtered_vcf.write(line)
else:
filtered_vcf.write(line)
def reformat_cutesv(inp, out):
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('#CHROM'):
headers = line.strip().split('\t')
filtered_vcf.write(
'##INFO=<ID=SVINSSEQ,Number=1,Type=String,Description="Sequence of insertion">\n'
)
filtered_vcf.write(line)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if columns[headers.index('QUAL')] != '.':
if 'DEL' in columns[headers.index('INFO')]:
columns[headers.index('REF')] = 'N'
columns[headers.index('ALT')] = '<DEL>'
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) + 1)
filtered_vcf.write('\t'.join(columns) + '\n')
elif 'INS' in columns[headers.index('INFO')]:
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) - 1)
columns[headers.index('REF')] = 'N'
columns[headers.index('INFO')] += ';SVINSSEQ={}'.format(
columns[headers.index('ALT')]
)
columns[headers.index('ALT')] = '<INS>'
filtered_vcf.write('\t'.join(columns) + '\n')
else:
columns[headers.index('REF')] = 'N'
filtered_vcf.write('\t'.join(columns) + '\n')
else:
filtered_vcf.write(line)
def reverse_complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A',
'W': 'W', 'S': 'S', 'M': 'K', 'K': 'M', 'R': 'Y', 'Y': 'R',
'B': 'V', 'V': 'B', 'D': 'H', 'H': 'D', 'N': 'N'}
return("".join(complement.get(base, base) for base in reversed(seq)))
def genomesv2vcf_convert(result_file, output_vcf, reference):
today_str = datetime.datetime.today().strftime("%Y%m%d")
header = '##fileformat=VCFv4.3\n'\
f'##fileDate={today_str}\n'\
f'##reference={reference}'
ref_tb = pysam.FastaFile(reference)
for (tchr, tlen) in zip(ref_tb.references, ref_tb.lengths):
header = header + '\n' + f"##contig=<ID={tchr},length={tlen}>"
header = header + '\n' + \
'##FILTER=<ID=Duplicate_with_close_SV,Description="When multiple SVs that share breakpoints in close proximity are detected, all but one SVs are filtered.">\n'\
'##FILTER=<ID=Duplicate_with_insertion,Description="Breakend SVs that are inferred to be the same as any of detected insertions">\n'\
'##FILTER=<ID=Duplicate_with_close_insertion,Description="When multiple insertions in close proximity are detected, all but one insertions are filtered.">\n'\
'##FILTER=<ID=SV_with_decoy,Description="SVs involving decoy contigs">\n'\
'##FILTER=<ID=Too_small_size,Description="Insertions whose size is below the threshould (currently 100bp)">\n'\
'##FILTER=<ID=Too_low_VAF,Description="SVs whose variant allele frequencies are inferred to be low">\n'\
'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'\
'##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'\
'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'\
'##INFO=<ID=MATEID,Number=1,Type=String,Description="ID of mate breakend">\n'\
'##INFO=<ID=SVINSLEN,Number=1,Type=Integer,Description="Length of insertion">\n'\
'##INFO=<ID=SVINSSEQ,Number=1,Type=String,Description="Sequence of insertion">\n'\
'##ALT=<ID=DEL,Description="Deletion">\n'\
'##ALT=<ID=INS,Description="Insertion">\n'\
'##ALT=<ID=DUP,Description="Duplication">\n'\
'##ALT=<ID=INV,Description="Inversion">\n'\
'##FORMAT=<ID=TR,Number=1,Type=Integer,Description="The number of reads around the breakpoints">\n'\
'##FORMAT=<ID=VR,Number=1,Type=Integer,Description="The number of variant supporting reads determined in the validation realignment step">'
with open(result_file, 'r') as hin, open(output_vcf, 'w') as hout:
dreader = csv.DictReader(hin, delimiter = '\t')
fieldname_list = dreader.fieldnames
is_control = True if "Checked_Read_Num_Control" in fieldname_list and "Supporting_Read_Num_Control" in fieldname_list else False
if is_control:
header = header + '\n' + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tTUMOR\tCONTROL"
else:
header = header + '\n' + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tTUMOR"
print(header, file = hout)
for F in dreader:
tchrom = F["Chr_1"]
tid = F["SV_ID"]
tqual = '.'
tfilter = F["Is_Filter"]
if F["Inserted_Seq"] != "---":
tsvinsseq = F["Inserted_Seq"]
tsvinslen = len(F["Inserted_Seq"])
else:
tsvinsseq = ''
tsvinslen = 0
tformat_sample = f'TR:VR\t{F["Checked_Read_Num_Tumor"]}:{F["Supporting_Read_Num_Tumor"]}'
if is_control:
tformat_sample = tformat_sample + f'\t{F["Checked_Read_Num_Control"]}:{F["Supporting_Read_Num_Control"]}'
if F["Chr_1"] == F["Chr_2"] and F["Dir_1"] == '+' and F["Dir_2"] == '-':
tpos = int(F["Pos_1"])
tref = ref_tb.fetch(tchrom, tpos - 1, tpos)
if tref == '' or tref is None: continue
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) - 1
tend = int(F["Pos_2"]) - 1
# Deletion
if tsvlen > tsvinslen:
talt = "<DEL>"
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) - 1
tinfo = f"END={tend};SVTYPE=DEL;SVLEN=-{tsvlen}"
if tsvinslen != 0:
tinfo = tinfo + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
# Insertion
elif tsvlen >= 0:
talt = "<INS>"
tinfo = f"END={tend};SVTYPE=INS;SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
else:
continue
print(f"{tchrom}\t{tpos}\t{tid}\t{tref}\t{talt}\t{tqual}\t{tfilter}\t{tinfo}\t{tformat_sample}", file = hout)
# Duplication
elif F["Chr_1"] == F["Chr_2"] and F["Dir_1"] == '-' and F["Dir_2"] == '+' and F["Pos_1"] != '1':
tpos = int(F["Pos_1"])
tref = ref_tb.fetch(tchrom, tpos - 1, tpos)
if tref == '' or tref is None: continue
talt = "<DUP>"
tend = int(F["Pos_2"])
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) + 1
tinfo = f"END={tend};SVTYPE=DUP;SVLEN={tsvlen}"
if tsvinslen != 0:
tinfo = tinfo + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom}\t{tpos}\t{tid}\t{tref}\t{talt}\t{tqual}\t{tfilter}\t{tinfo}\t{tformat_sample}", file = hout)
# Breakend
elif F["Chr_1"] != F["Chr_2"]:
tchrom1 = F["Chr_1"]
tpos1 = int(F["Pos_1"])
tref1 = ref_tb.fetch(tchrom1, tpos1 - 1, tpos1)
if tref1 == '' or tref1 is None: continue
tchrom2 = F["Chr_2"]
tpos2 = int(F["Pos_2"])
tref2 = ref_tb.fetch(tchrom2, tpos2 - 1, tpos2)
if tref2 == '' or tref2 is None: continue
tbracket = ']' if F["Dir_2"] == '+' else '['
if F["Dir_1"] == '+':
talt1 = f'{tref1}{tsvinsseq}{tbracket}{tchrom2}:{tpos2}{tbracket}'
else:
talt1 = f'{tbracket}{tchrom2}:{tpos2}{tbracket}{tsvinsseq}{tref2}'
tinfo1 = f"SVTYPE=BND;MATEID={tid}_1"
if tsvinslen != 0: tinfo1 = tinfo1 + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom1}\t{tpos1}\t{tid}_0\t{tref1}\t{talt1}\t{tqual}\t{tfilter}\t{tinfo1}\t{tformat_sample}", file = hout)
# tchrom2 = F["Chr_2"]
# tpos = int(F["Pos_2"])
# tref = ref_tb.fetch(tchrom2, tpos - 1, tpos)
# if tref == '' or tref is None: continue
tbracket = ']' if F["Dir_1"] == '+' else '['
tsvinsseq = reverse_complement(tsvinsseq)
if F["Dir_2"] == '+':
talt2 = f'{tref2}{tsvinsseq}{tbracket}{tchrom1}:{tpos1}{tbracket}'
else:
talt2 = f'{tbracket}{tchrom1}:{tpos1}{tbracket}{tsvinsseq}{tref2}'
tinfo2 = f"SVTYPE=BND;MATEID={tid}_0"
if tsvinslen != 0: tinfo2 = tinfo2 + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom2}\t{tpos2}\t{tid}_1\t{tref2}\t{talt2}\t{tqual}\t{tfilter}\t{tinfo2}\t{tformat_sample}", file = hout)
else:
tpos = int(F["Pos_1"])
tref = ref_tb.fetch(tchrom, tpos - 1, tpos)
if tref == '' or tref is None: continue
talt = "<INV>"
tend = int(F["Pos_2"])
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) + 1
tinfo = f"END={tend};SVTYPE=INV;SVLEN={tsvlen}"
if tsvinslen != 0:
tinfo = tinfo + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom}\t{tpos}\t{tid}\t{tref}\t{talt}\t{tqual}\t{tfilter}\t{tinfo}\t{tformat_sample}", file = hout) |
the-stack_0_13555 | import pytest
from vnep_approx import treewidth_model
import numpy as np
from alib import mip
from alib import datamodel as dm
from alib import util
from .test_data.request_test_data import create_test_request
from .test_data.substrate_test_data import create_test_substrate_topology_zoo
from .test_data.substrate_test_data import create_test_substrate_topology_zoo
import random
import time
import logging
import itertools
logger = util.get_logger(__name__, make_file=False, propagate=True)
random.seed(0)
# TEST Valid Shortest Path Computer
@pytest.mark.parametrize("substrate_id", ["BtAsiaPac", "DeutscheTelekom", "Geant2012", "Surfnet", "Dfn"])
@pytest.mark.parametrize("cost_spread", [-1, 0.5, 1.0, 2.0, 4.0, 8.0]) #cost spread of -1 will test uniform costs
def test_shortest_valid_paths_computer_no_latencies(substrate_id, cost_spread):
req = create_test_request("single edge", set_allowed_nodes=False)
sub = create_test_substrate_topology_zoo(substrate_id, include_latencies=False)
vmrc = treewidth_model.ValidMappingRestrictionComputer(sub, req)
vmrc.compute()
if cost_spread == -1:
# uniform edge costs
edge_costs = {sedge: 1.0 for sedge in sub.edges}
svpc_dijkstra = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_NoLatencies, sub, vmrc, edge_costs)
svpc_dijkstra.compute()
for reqedge in req.edges:
for snode_source in sub.nodes:
for snode_target in sub.nodes:
if snode_source == snode_target:
assert svpc_dijkstra.valid_sedge_costs[reqedge][(snode_source, snode_target)] == 0
else:
assert svpc_dijkstra.valid_sedge_costs[reqedge][(snode_source, snode_target)] >= 1
else:
# random edge costs
edge_costs = {sedge: max(1, 1000.0 * random.random()) for sedge in sub.edges}
for sedge in sub.edges:
sub.edge[sedge]['cost'] = edge_costs[sedge]
bellman_ford_time = time.time()
sub.initialize_shortest_paths_costs()
bellman_ford_time = time.time() - bellman_ford_time
svpc_dijkstra = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_NoLatencies, sub, vmrc, edge_costs)
dijkstra_time = time.time()
svpc_dijkstra.compute()
dijkstra_time = time.time() - dijkstra_time
for reqedge in req.edges:
for snode_source in sub.nodes:
for snode_target in sub.nodes:
# print svpc.valid_sedge_costs[reqedge][(snode_source, snode_target)]
# print sub.get_shortest_paths_cost(snode_source, snode_target)
assert svpc_dijkstra.valid_sedge_costs[reqedge][(snode_source, snode_target)] == pytest.approx(
sub.get_shortest_paths_cost(snode_source, snode_target))
logger.info(
"\nComputation times were:\n\tBellman-Ford: {:2.4f}\n"
"\tDijkstra: {:2.4f}\n"
"\tSpeedup by using Dijkstra over Bellman: {:2.2f} (<1 is bad)\n".format(
bellman_ford_time, dijkstra_time, (bellman_ford_time / dijkstra_time)))
@pytest.mark.parametrize("substrate_id", ["BtAsiaPac"])#, "DeutscheTelekom", "Geant2012", "Surfnet", "Dfn"])
@pytest.mark.parametrize("cost_spread", [0.5, 1.0, 2.0, 4.0, 8.0])
@pytest.mark.parametrize("epsilon", [1.0, 0.5, 0.1, 0.01])
@pytest.mark.parametrize("limit_factor", [8.0, 4.0, 2.0, 1.0, 0.5])
def test_shortest_valid_paths_with_latencies(substrate_id, cost_spread, epsilon, limit_factor):
req = create_test_request("single edge", set_allowed_nodes=False)
sub = create_test_substrate_topology_zoo(substrate_id, include_latencies=True)
vmrc = treewidth_model.ValidMappingRestrictionComputer(sub, req)
vmrc.compute()
edge_costs = {sedge: cost_spread*random.random()+1.0 for sedge in sub.edges}
for sedge in sub.edges:
sub.edge[sedge]['cost'] = edge_costs[sedge]
maximal_latency_upper_bound = sum([sub.edge[sedge]["latency"] for sedge in sub.edges])
minimum_edge_cost = min([sub.edge[sedge]["cost"] for sedge in sub.edges])
average_latency = maximal_latency_upper_bound / len(sub.edges)
edge_costs = {sedge: sub.edge[sedge]['cost'] for sedge in sub.edges}
edge_latencies = {sedge: sub.edge[sedge]['latency'] for sedge in sub.edges}
limit = average_latency * limit_factor
runtime_exact_mip = 0.0
runtime_approx_mip = 0.0
runtime_strict = 0.0
runtime_flex = 0.0
def time_computation(spvc):
start_time = time.time()
spvc.compute()
return time.time() - start_time
def compute_latency_of_path(sedge_path):
if sedge_path is None:
return 0.0
return sum([sub.edge[sedge]["latency"] for sedge in sedge_path])
def nan_to_negative_value(value):
if np.isnan(value):
# this guarantees that this cost is ..
# a) negative and
# b) the absolute value of the returned cost is smaller than the minimum cost value
return -minimum_edge_cost / (10*max(epsilon, 1/epsilon))
return value
svpc_exact_mip = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Exact_MIP,
sub,
vmrc,
edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=0.0)
svpc_approximate_mip = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Exact_MIP,
sub, vmrc, edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=epsilon)
svpc_strict = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Strict, sub, vmrc, edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=epsilon)
svpc_flex = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Flex, sub, vmrc, edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=epsilon)
logger.info("\n\n========================================================================================================\n\n"
"Considering now a latency limit of {} (average latency is {}), an epsilon of {} and a cost spread of {}\n\n"
"========================================================================================================\n\n".format(limit, average_latency, epsilon, cost_spread))
logger.info("\n\nStarting exact MIP...\n\n")
runtime_exact_mip += time_computation(svpc_exact_mip)
logger.info("\n\nStarting approximate MIP...\n\n")
runtime_approx_mip += time_computation(svpc_approximate_mip)
logger.info("\n\nStarting strict...\n\n")
runtime_strict += time_computation(svpc_strict)
logger.info("\n\nStarting flex ...\n\n")
runtime_flex += time_computation(svpc_flex)
logger.info(
"\t{:^6s} | {:^6s} || {:^15s} | {:^15s} | {:^15s} | {:^15s} || {:^15s} | {:^15s} || {:^15s} | {:^15s} | {:^15s} | {:^15s}".format("Source", "Target", "c(Flex)", "c(Exact-MIP)",
"c(Approx-MIP)", "c(Strict)", "epsilon", "latency_bound", "l(Flex)", "l(Exact-MIP)",
"l(Approx-MIP)", "l(Strict)"))
failure_counts = {alg: {"cost": 0, "lat": 0} for alg in ["exact_mip", "approx_mip", "flex", "strict"]}
for reqedge in req.edges:
for snode_source in sub.nodes:
for snode_target in sub.nodes:
if snode_source == snode_target:
assert svpc_exact_mip.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
assert svpc_approximate_mip.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
assert svpc_strict.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
assert svpc_flex.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
else:
cost_flex = nan_to_negative_value(svpc_flex.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
cost_exact_mip = nan_to_negative_value(svpc_exact_mip.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
cost_approx_mip = nan_to_negative_value(svpc_approximate_mip.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
cost_strict = nan_to_negative_value(svpc_strict.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
path_flex = svpc_flex.get_valid_sedge_path(reqedge, snode_source, snode_target)
path_exact_mip = svpc_exact_mip.get_valid_sedge_path(reqedge, snode_source, snode_target)
path_approx_mip = svpc_approximate_mip.get_valid_sedge_path(reqedge, snode_source, snode_target)
path_strict = svpc_strict.get_valid_sedge_path(reqedge, snode_source, snode_target)
lat_flex = compute_latency_of_path(path_flex)
lat_exact_mip = compute_latency_of_path(path_exact_mip)
lat_approx_mip = compute_latency_of_path(path_approx_mip)
lat_strict = compute_latency_of_path(path_strict)
failure_dict = {alg : {"cost": False, "lat": False} for alg in ["exact_mip", "approx_mip", "flex", "strict"]}
def value_lies_outside_of_range(value, reference_value, lower_factor, upper_factor):
result = False
result |= (abs(value) < abs(reference_value) * lower_factor)
result |= (abs(value) > abs(reference_value) * upper_factor)
return result
def bool_to_failure_output(boolean_value):
if boolean_value:
return "FAILED"
else:
return "PASSED"
failure_dict["approx_mip"]["cost"] |= value_lies_outside_of_range(cost_approx_mip, cost_exact_mip, 0.999, 1.001 + epsilon)
failure_dict["strict"]["cost"] |= value_lies_outside_of_range(cost_strict, cost_exact_mip, 0.999, 1.001 + epsilon)
failure_dict["flex"]["cost"] |= value_lies_outside_of_range(cost_flex, cost_exact_mip, 0.0, 1.001)
failure_dict["exact_mip"]["lat"] |= value_lies_outside_of_range(lat_exact_mip, limit, 0.0, 1.001)
failure_dict["approx_mip"]["lat"] |= value_lies_outside_of_range(lat_approx_mip, limit, 0.0, 1.001)
failure_dict["strict"]["lat"] |= value_lies_outside_of_range(lat_strict, limit, 0.0, 1.001)
failure_dict["flex"]["lat"] |= value_lies_outside_of_range(lat_exact_mip, limit, 0.0, 1.001 + epsilon)
failure_found = any([failure_dict[alg][type] for alg in failure_dict for type in failure_dict[alg]])
failure_message = None
output_message = "\t{:^6s} | {:^6s} || {:^15.4f} | {:^15.4f} | {:^15.4f} | {:^15.4f} || {:^15.4f} | {:^15.4f} || {:^15.4f} | {:^15.4f} | {:^15.4f} | {:^15.4f} ".format(
snode_source,
snode_target,
cost_flex,
cost_exact_mip,
cost_approx_mip,
cost_strict,
epsilon,
limit,
lat_flex,
lat_exact_mip,
lat_approx_mip,
lat_strict
)
if failure_found:
failure_message = "\t{:^6s} | {:^6s} || {:^15s} | {:^15s} | {:^15s} | {:^15s} || {:^15.4f} | {:^15.4f} || {:^15s} | {:^15s} | {:^15s} | {:^15s} ".format(
snode_source,
snode_target,
bool_to_failure_output(failure_dict["flex"]["cost"]),
bool_to_failure_output(failure_dict["exact_mip"]["cost"]),
bool_to_failure_output(failure_dict["approx_mip"]["cost"]),
bool_to_failure_output(failure_dict["strict"]["cost"]),
epsilon,
limit,
bool_to_failure_output(failure_dict["flex"]["lat"]),
bool_to_failure_output(failure_dict["exact_mip"]["lat"]),
bool_to_failure_output(failure_dict["approx_mip"]["lat"]),
bool_to_failure_output(failure_dict["strict"]["lat"])
)
if failure_found:
logger.error(output_message)
logger.error(failure_message)
else:
logger.debug(output_message)
for alg in failure_dict:
for type in failure_dict[alg]:
if failure_dict[alg][type]:
failure_counts[alg][type] += 1
logger.info("Runtimes are \n"
"\tExact-MIP: {:10.4f}\n"
"\tApprox-MIP: {:10.4f}\n"
"\tStrict: {:10.4f}\n"
"\tFlex: {:10.4f}\n\n\n".format(runtime_exact_mip,
runtime_approx_mip,
runtime_strict,
runtime_flex))
number_of_failed_tests = sum([failure_counts[alg][type] for alg in failure_counts for type in failure_counts[alg]])
logger.info("Total number of failures: {}\n".format(number_of_failed_tests))
number_of_node_combinations = len(sub.nodes) * len(sub.nodes)
for alg in failure_counts:
for type in failure_counts[alg]:
if failure_counts[alg][type] > 0:
logger.error("\tSummary\t{:^15s} {:^15s}: {:4d} failed of {:4d} ({:6.3f}%)".format(alg, type, failure_counts[alg][type], number_of_node_combinations, 100.0*failure_counts[alg][type]/float(number_of_node_combinations)))
else:
logger.info(
"\t\Summary\t{:^15s} {:^15s}: {:4d} failed of {:4d} ({:6.3f}%)".format(alg, type, failure_counts[alg][type],
number_of_node_combinations,
100.0 * failure_counts[alg][type] / float(
number_of_node_combinations)))
assert number_of_failed_tests == 0 |
the-stack_0_13557 | import abc
from typing import Any, Dict, List, Optional
import tqdm
from openforcefield.topology import Molecule
from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper
from pydantic import BaseModel, Field, validator
from pydantic.main import ModelMetaclass
from qcelemental.util import which_import
from ..common_structures import ComponentProperties
from ..datasets import ComponentResult
class InheritSlots(ModelMetaclass):
# This allows subclasses of CustomWorkFlowComponentto inherit __slots__
def __new__(mcs, name, bases, namespace):
slots = set(namespace.pop("__slots__", tuple()))
for base in bases:
if hasattr(base, "__slots__"):
slots.update(base.__slots__)
if "__dict__" in slots:
slots.remove("__dict__")
namespace["__slots__"] = tuple(slots)
return ModelMetaclass.__new__(mcs, name, bases, namespace)
class CustomWorkflowComponent(BaseModel, abc.ABC, metaclass=InheritSlots):
"""
This is an abstract base class which should be used to create all workflow components, following the design of this
class should allow users to easily create new work flow components with out needing to change much of the dataset
factory code
"""
component_name: str = Field(
..., description="The name of the component which should match the class name."
)
component_description: str = Field(
...,
description="A short description of what the component will do to the molecules.",
)
component_fail_message: str = Field(
...,
description="A short description with hints on why the molecule may have caused an error in this workflow component.",
)
_properties: ComponentProperties = Field(
...,
description="The internal runtime properties of the component which can not be changed, these indecate if the component can be ran in parallel and if it may produce duplicate molecules.",
)
_cache: Dict
class Config:
validate_assignment = True
arbitrary_types_allowed = True
# this is a pydantic workaround to add private variables taken from
# https://github.com/samuelcolvin/pydantic/issues/655
__slots__ = [
"_cache",
]
def __init__(self, *args, **kwargs):
super(CustomWorkflowComponent, self).__init__(*args, **kwargs)
self._cache = {}
def __setattr__(self, attr: str, value: Any) -> None:
"""
Overwrite the Pydantic setattr to configure the handling of our __slots___
"""
if attr in self.__slots__:
object.__setattr__(self, attr, value)
else:
super(CustomWorkflowComponent, self).__setattr__(attr, value)
# getstate and setstate are needed since private instance members (_*)
# are not included in pickles by Pydantic at the current moment. Force them
# to be added here. This is needed for multiprocessing support.
def __getstate__(self):
return (
super().__getstate__(),
{slot: getattr(self, slot) for slot in self.__slots__},
)
def __setstate__(self, state):
super().__setstate__(state[0])
d = state[1]
for slot in d:
setattr(self, slot, d[slot])
@classmethod
@abc.abstractmethod
def is_available(cls) -> bool:
"""
This method should identify if the component can be used by checking if the requirements are available.
Returns:
`True` if the component can be used else `False`
"""
...
@abc.abstractmethod
def _apply(self, molecules: List[Molecule]) -> ComponentResult:
"""
This is the main feature of the workflow component which should accept a molecule, perform the component action
and then return the result.
Parameters:
molecules: The list of molecules to be processed by this component.
Returns:
An instance of the [ComponentResult][qcsubmit.datasets.ComponentResult]
class which handles collecting together molecules that pass and fail
the component
"""
...
def _apply_init(self, result: ComponentResult) -> None:
"""
Any actions that should be performed before running the main apply method should set up such as setting up the _cache for multiprocessing.
Here we clear out the _cache in case something has been set.
"""
self._cache.clear()
def _apply_finalize(self, result: ComponentResult) -> None:
"""
Any clean up actions should be added here, by default the _cache is cleaned.
"""
self._cache.clear()
def apply(
self,
molecules: List[Molecule],
processors: Optional[int] = None,
verbose: bool = True,
) -> ComponentResult:
"""
This is the main feature of the workflow component which should accept a molecule, perform the component action
and then return the
Parameters:
molecules: The list of molecules to be processed by this component.
processors: The number of processor the component can use to run the job in parallel across molecules, None will default to all cores.
verbose: If true a progress bar will be shown on screen.
Returns:
An instance of the [ComponentResult][qcsubmit.datasets.ComponentResult]
class which handles collecting together molecules that pass and fail
the component
"""
result: ComponentResult = self._create_result()
self._apply_init(result)
# Use a Pool to get around the GIL. As long as self does not contain
# too much data, this should be efficient.
if (processors is None or processors > 1) and self._properties.process_parallel:
from multiprocessing.pool import Pool
with Pool(processes=processors) as pool:
# Assumes to process in batches of 1 for now
work_list = [
pool.apply_async(self._apply, ([molecule],))
for molecule in molecules
]
for work in tqdm.tqdm(
work_list,
total=len(work_list),
ncols=80,
desc="{:30s}".format(self.component_name),
disable=not verbose,
):
work = work.get()
for success in work.molecules:
result.add_molecule(success)
for fail in work.filtered:
result.filter_molecule(fail)
else:
for molecule in tqdm.tqdm(
molecules,
total=len(molecules),
ncols=80,
desc="{:30s}".format(self.component_name),
disable=not verbose,
):
work = self._apply([molecule])
for success in work.molecules:
result.add_molecule(success)
for fail in work.filtered:
result.filter_molecule(fail)
self._apply_finalize(result)
return result
@abc.abstractmethod
def provenance(self) -> Dict:
"""
This function should detail the programs with version information and procedures called during activation
of the workflow component.
Returns:
A dictionary containing the information about the component and the functions called.
"""
...
def _create_result(self, **kwargs) -> ComponentResult:
"""
A helpful method to build to create the component result with the required information.
Returns:
A [ComponentResult][qcsubmit.datasets.ComponentResult] instantiated with the required information.
"""
result = ComponentResult(
component_name=self.component_name,
component_description=self.dict(),
component_provenance=self.provenance(),
skip_unique_check=not self._properties.produces_duplicates,
**kwargs,
)
return result
class ToolkitValidator(BaseModel):
"""
A pydantic mixin class that adds toolkit settings and validation along with provenance information.
Note:
The provenance information and toolkit settings are handled by the
[ToolkitValidator][qcsubmit.workflow_components.base_component.ToolkitValidator] mixin.
"""
toolkit: str = Field(
"openeye",
description="The name of the toolkit which should be used in this component.",
)
_toolkits: Dict = {"rdkit": RDKitToolkitWrapper, "openeye": OpenEyeToolkitWrapper}
@validator("toolkit")
def _check_toolkit(cls, toolkit):
"""
Make sure that toolkit is one of the supported types in the OFFTK.
"""
if toolkit not in cls._toolkits.keys():
raise ValueError(
f"The requested toolkit ({toolkit}) is not support by the OFFTK. "
f"Please chose from {cls._toolkits.keys()}."
)
else:
return toolkit
def provenance(self) -> Dict:
"""
This component calls the OFFTK to perform the task and logs information on the backend toolkit used.
Returns:
A dictionary containing the version information about the backend toolkit called to perform the task.
"""
import openforcefield
import qcsubmit
provenance = {
"OpenforcefieldToolkit": openforcefield.__version__,
"QCSubmit": qcsubmit.__version__,
}
if self.toolkit == "rdkit":
import rdkit
provenance["rdkit"] = rdkit.__version__
elif self.toolkit == "openeye":
import openeye
provenance["openeye"] = openeye.__version__
return provenance
@classmethod
def is_available(cls) -> bool:
"""
Check if any of the requested backend toolkits can be used.
"""
if len(cls._toolkits) == 1:
# the package needs a specific toolkit so raise the error
raise_error = True
else:
raise_error = False
for toolkit in cls._toolkits:
if toolkit == "openeye":
oe = which_import(
".oechem",
package="openeye",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install openeye-toolkits -c openeye`.",
)
if oe:
return True
elif toolkit == "rdkit":
rdkit = which_import(
"rdkit",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install rdkit -c conda-forge`.",
)
if rdkit:
return True
# if we are here both toolkits are missing
raise ModuleNotFoundError(
f"Openeye or RDKit is required to use this component please install via `conda install openeye-toolkits -c openeye` or `conda install rdkit -c conda-forge`."
)
class BasicSettings(BaseModel):
"""
This mixin identifies the class as being basic and always being available as it only requires basic packages.
"""
@classmethod
def is_available(cls) -> bool:
"""
This component is basic if it requires no extra dependencies.
"""
return True
def provenance(self) -> Dict:
"""
The basic settings provenance generator.
"""
import openforcefield
import qcsubmit
provenance = {
"OpenforcefieldToolkit": openforcefield.__version__,
"QCSubmit": qcsubmit.__version__,
}
return provenance
|
the-stack_0_13558 | from pathlib import Path
from setuptools import find_packages, setup
module_dir = Path(__file__).resolve().parent
with open(module_dir / "README.md") as f:
long_description = f.read()
if __name__ == "__main__":
setup(
name="atomate2",
setup_requires=["setuptools_scm"],
use_scm_version={"version_scheme": "python-simplified-semver"},
description="atomate2 is a library of materials science workflows",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hackingmaterials/atomate2",
author="Alex Ganose",
author_email="[email protected]",
license="modified BSD",
keywords="high-throughput automated workflow dft vasp",
package_dir={"": "src"},
package_data={"atomate2": ["py.typed"]},
packages=find_packages("src"),
data_files=["LICENSE"],
zip_safe=False,
include_package_data=True,
install_requires=[
"pymatgen>=2019.11.11",
"custodian>=2019.8.24",
"pydantic",
"monty",
"jobflow>=0.1.5",
"PyYAML",
"numpy",
"click",
],
extras_require={
"amset": ["amset>=0.4.15", "pydash"],
"cclib": ["cclib"],
"docs": [
"sphinx==4.5.0",
"numpydoc==1.2.1",
"mistune==0.8.4",
"ipython==8.2.0",
"FireWorks==2.0.2",
"pydata-sphinx-theme==0.8.1",
"autodoc_pydantic==1.6.1",
"sphinx_panels==0.6.0",
"myst-parser==0.17.0",
],
"tests": [
"pytest==7.1.1",
"pytest-cov==3.0.0",
"FireWorks==2.0.2",
# "amset==0.4.15",
],
"dev": ["pre-commit>=2.12.1"],
"phonons": ["phonopy>=1.10.8"],
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Information Technology",
"Operating System :: OS Independent",
"Topic :: Other/Nonlisted Topic",
"Topic :: Scientific/Engineering",
],
python_requires=">=3.8",
tests_require=["pytest"],
entry_points={
"console_scripts": [
"atm = atomate2.cli:cli",
]
},
)
|
the-stack_0_13559 |
class BaseCAM(object):
"""
Base class for Class Activation Mapping.
"""
def __init__(self, model_dict):
"""Init
# Arguments
model_dict: dict. A dict with format model_dict = dict(arch=self.model, layer_name=target_layer_name).
"""
layer_name = model_dict['layer_name']
self.model_arch = model_dict['arch']
self.model_arch.eval()
self.gradients = dict()
self.activations = dict()
# save gradient
def backward_hook(module, grad_input, grad_output):
self.gradients['value'] = grad_output[0]
return None
# save activation map
def forward_hook(module, input, output):
self.activations['value'] = output
return None
target_layer = self.find_layer(self.model_arch, layer_name)
target_layer.register_forward_hook(forward_hook)
target_layer.register_backward_hook(backward_hook)
def find_layer(self, arch, target_layer_name):
if target_layer_name is None:
if 'resnet' in str(type(arch)):
target_layer_name = 'layer4'
elif 'alexnet' in str(type(arch)) or 'vgg' in str(type(arch)) or 'squeezenet' in str(type(arch)) or 'densenet' in str(type(arch)):
target_layer_name = 'features'
else:
raise Exception('Invalid layer name! Please specify layer name.', target_layer_name)
hierarchy = target_layer_name.split('_')
if hierarchy[0] not in arch._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
if hierarchy[1] not in target_layer._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) >= 3:
if hierarchy[2] not in target_layer._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) >= 4:
if hierarchy[3] not in target_layer._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = target_layer._modules[hierarchy[3]]
return target_layer
def forward(self, input_, class_idx=None, retain_graph=False):
return None
def __call__(self, input_, class_idx=None, retain_graph=False):
return self.forward(input_, class_idx, retain_graph)
|
the-stack_0_13560 |
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
class _GitImportFix:
import sys
former_path = sys.path[:]
@classmethod
def apply(cls):
# HACK: fix application and module name clash
# 'git' app is found earlier than a library in the path.
# The clash is introduced by unittest discover
import sys
print('apply')
apps_dir = __file__[:__file__.rfind('/dataset_manager/')]
assert 'apps' in apps_dir
try:
sys.path.remove(apps_dir)
except ValueError:
pass
for name in list(sys.modules):
if name.startswith('git.') or name == 'git':
m = sys.modules.pop(name, None)
del m
import git
assert apps_dir not in git.__file__
@classmethod
def restore(cls):
import sys
print('restore')
for name in list(sys.modules):
if name.startswith('git.') or name == 'git':
m = sys.modules.pop(name)
del m
sys.path.insert(0, __file__[:__file__.rfind('/dataset_manager/')])
import importlib
importlib.invalidate_caches()
def _setUpModule():
_GitImportFix.apply()
import cvat.apps.dataset_manager.task as dm
from cvat.apps.engine.models import Task
globals()['dm'] = dm
globals()['Task'] = Task
import sys
sys.path.insert(0, __file__[:__file__.rfind('/dataset_manager/')])
def tearDownModule():
_GitImportFix.restore()
from io import BytesIO
import os
import random
import tempfile
from PIL import Image
from django.contrib.auth.models import User, Group
from rest_framework.test import APITestCase, APIClient
from rest_framework import status
_setUpModule()
def generate_image_file(filename):
f = BytesIO()
width = random.randint(10, 200)
height = random.randint(10, 200)
image = Image.new('RGB', size=(width, height))
image.save(f, 'jpeg')
f.name = filename
f.seek(0)
return f
def create_db_users(cls):
group_user, _ = Group.objects.get_or_create(name="user")
user_dummy = User.objects.create_superuser(username="test", password="test", email="")
user_dummy.groups.add(group_user)
cls.user = user_dummy
class ForceLogin:
def __init__(self, user, client):
self.user = user
self.client = client
def __enter__(self):
if self.user:
self.client.force_login(self.user,
backend='django.contrib.auth.backends.ModelBackend')
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.user:
self.client.logout()
class TaskExportTest(APITestCase):
def setUp(self):
self.client = APIClient()
@classmethod
def setUpTestData(cls):
create_db_users(cls)
def _generate_task(self):
task = {
"name": "my task #1",
"owner": '',
"assignee": '',
"overlap": 0,
"segment_size": 100,
"z_order": False,
"labels": [
{
"name": "car",
"attributes": [
{
"name": "model",
"mutable": False,
"input_type": "select",
"default_value": "mazda",
"values": ["bmw", "mazda", "renault"]
},
{
"name": "parked",
"mutable": True,
"input_type": "checkbox",
"default_value": False
},
]
},
{"name": "person"},
]
}
task = self._create_task(task, 3)
annotations = {
"version": 0,
"tags": [
{
"frame": 0,
"label_id": task["labels"][0]["id"],
"group": None,
"attributes": []
}
],
"shapes": [
{
"frame": 0,
"label_id": task["labels"][0]["id"],
"group": None,
"attributes": [
{
"spec_id": task["labels"][0]["attributes"][0]["id"],
"value": task["labels"][0]["attributes"][0]["values"][0]
},
{
"spec_id": task["labels"][0]["attributes"][1]["id"],
"value": task["labels"][0]["attributes"][0]["default_value"]
}
],
"points": [1.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": False
},
{
"frame": 1,
"label_id": task["labels"][1]["id"],
"group": None,
"attributes": [],
"points": [2.0, 2.1, 100, 300.222, 400, 500, 1, 3],
"type": "polygon",
"occluded": False
},
],
"tracks": [
{
"frame": 0,
"label_id": task["labels"][0]["id"],
"group": None,
"attributes": [
{
"spec_id": task["labels"][0]["attributes"][0]["id"],
"value": task["labels"][0]["attributes"][0]["values"][0]
},
],
"shapes": [
{
"frame": 0,
"points": [1.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": False,
"outside": False,
"attributes": [
{
"spec_id": task["labels"][0]["attributes"][1]["id"],
"value": task["labels"][0]["attributes"][1]["default_value"]
}
]
},
{
"frame": 1,
"attributes": [],
"points": [2.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": True,
"outside": True
},
]
},
{
"frame": 1,
"label_id": task["labels"][1]["id"],
"group": None,
"attributes": [],
"shapes": [
{
"frame": 1,
"attributes": [],
"points": [1.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": False,
"outside": False
}
]
},
]
}
self._put_api_v1_task_id_annotations(task["id"], annotations)
return task, annotations
def _create_task(self, data, size):
with ForceLogin(self.user, self.client):
response = self.client.post('/api/v1/tasks', data=data, format="json")
assert response.status_code == status.HTTP_201_CREATED, response.status_code
tid = response.data["id"]
images = {
"client_files[%d]" % i: generate_image_file("image_%d.jpg" % i)
for i in range(size)
}
images["image_quality"] = 75
response = self.client.post("/api/v1/tasks/{}/data".format(tid), data=images)
assert response.status_code == status.HTTP_202_ACCEPTED, response.status_code
response = self.client.get("/api/v1/tasks/{}".format(tid))
task = response.data
return task
def _put_api_v1_task_id_annotations(self, tid, data):
with ForceLogin(self.user, self.client):
response = self.client.put("/api/v1/tasks/{}/annotations".format(tid),
data=data, format="json")
return response
def _test_export(self, format_name, save_images=False):
self.assertTrue(format_name in [f['tag'] for f in dm.EXPORT_FORMATS])
task, _ = self._generate_task()
project = dm.TaskProject.from_task(
Task.objects.get(pk=task["id"]), self.user.username)
with tempfile.TemporaryDirectory() as test_dir:
project.export(format_name, test_dir, save_images=save_images)
self.assertTrue(os.listdir(test_dir))
def test_datumaro(self):
self._test_export(dm.EXPORT_FORMAT_DATUMARO_PROJECT, save_images=False)
def test_coco(self):
self._test_export('cvat_coco', save_images=True)
def test_voc(self):
self._test_export('cvat_voc', save_images=True)
def test_tf_detection_api(self):
self._test_export('cvat_tfrecord', save_images=True)
def test_yolo(self):
self._test_export('cvat_yolo', save_images=True)
def test_mot(self):
self._test_export('cvat_mot', save_images=True)
def test_labelme(self):
self._test_export('cvat_label_me', save_images=True)
def test_formats_query(self):
formats = dm.get_export_formats()
expected = set(f['tag'] for f in dm.EXPORT_FORMATS)
actual = set(f['tag'] for f in formats)
self.assertSetEqual(expected, actual)
|
the-stack_0_13561 | #!/home/kyy/lecture/search-restaurants/venv/bin/python3.5
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <[email protected]>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
the-stack_0_13562 | import sys
def main(input_file):
with open(input_file, 'r') as fh:
for line in fh:
line = line.strip()
days = int(line.split(';')[0])
array = [int(x) for x in line.split(';')[1].split(' ')]
result = 0
for i in range(0, len(array)-days+1):
result = max(sum(array[i:i+days]), result)
print(result)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <input_file>" % sys.argv[0])
sys.exit(1)
main(sys.argv[1])
|
the-stack_0_13563 | # Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import math
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import strutils
import six
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import versioned_method
from nova import exception
from nova import i18n
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
_SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
}
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
_METHODS_WITH_BODY = [
'POST',
'PUT',
]
# The default api version request if none is requested in the headers
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
DEFAULT_API_VERSION = "2.1"
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Name of header used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'X-OpenStack-Nova-API-Version'
ENV_LEGACY_V2 = 'openstack.legacy_v2'
def get_supported_content_types():
return _SUPPORTED_CONTENT_TYPES
def get_media_map():
return dict(_MEDIA_TYPE_MAP.items())
# NOTE(rlrossit): This function allows a get on both a dict-like and an
# object-like object. cache_db_items() is used on both versioned objects and
# dicts, so the function can't be totally changed over to [] syntax, nor
# can it be changed over to use getattr().
def item_get(item, item_key):
if hasattr(item, '__getitem__'):
return item[item_key]
else:
return getattr(item, item_key)
class Request(wsgi.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item_get(item, item_key)] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def cache_db_compute_nodes(self, compute_nodes):
self.cache_db_items('compute_nodes', compute_nodes, 'id')
def cache_db_compute_node(self, compute_node):
self.cache_db_items('compute_nodes', [compute_node], 'id')
def get_db_compute_nodes(self):
return self.get_db_items('compute_nodes')
def get_db_compute_node(self, id):
return self.get_db_item('compute_nodes', id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in get_supported_content_types():
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(
get_supported_content_types())
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in get_supported_content_types():
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determine the best available language for the request.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
return self.accept_language.best_match(
i18n.get_available_languages())
def set_api_version_request(self):
"""Set API version request based on the request header information."""
if API_VERSION_REQUEST_HEADER in self.headers:
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
# 'latest' is a special keyword which is equivalent to requesting
# the maximum version of the API supported
if hdr_string == 'latest':
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
hdr_string)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
else:
self.api_version_request = api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION)
def set_legacy_v2(self):
self.environ[ENV_LEGACY_V2] = True
def is_legacy_v2(self):
return self.environ.get(ENV_LEGACY_V2, False)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class JSONDeserializer(ActionDispatcher):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class JSONDictSerializer(ActionDispatcher):
"""Default JSON request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return six.text_type(jsonutils.dumps(data))
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object
Object that app methods may return in order to allow its response
to be modified by extensions in the code. Its use is optional (and
should only be used if you really know what you are doing).
"""
def __init__(self, obj, code=None, headers=None):
"""Builds a response object."""
self.obj = obj
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = JSONDictSerializer()
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def serialize(self, request, content_type):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
serializer = self.serializer
body = None
if self.obj is not None:
body = serializer.serialize(self.obj)
response = webob.Response(body=body)
if response.headers.get('Content-Length'):
# NOTE(andreykurilin): we need to encode 'Content-Length' header,
# since webob.Response auto sets it if "body" attr is presented.
# https://github.com/Pylons/webob/blob/1.5.0b0/webob/response.py#L147
response.headers['Content-Length'] = utils.utf8(
response.headers['Content-Length'])
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = utils.utf8(value)
response.headers['Content-Type'] = utils.utf8(content_type)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek(body):
"""Determine action to invoke.
This looks inside the json body and fetches out the action method
name.
"""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action name
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.Forbidden):
raise Fault(webob.exc.HTTPForbidden(
explanation=ex_value.format_message()))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code,
explanation=ex_value.format_message()))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE('Exception handling resource: %s'), ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = False
def __init__(self, controller, inherits=None):
""":param controller: object that implement methods created by routes
lib
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
self.default_serializers = dict(json=JSONDictSerializer)
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
content_type = request.get_content_type()
return content_type, request.body
def deserialize(self, body):
return JSONDeserializer().deserialize(body)
# NOTE(sdague): I didn't start the fire, however here is what all
# of this is about.
#
# In the legacy v2 code stack, extensions could extend actions
# with a generator that let 1 method be split into a top and
# bottom half. The top half gets executed before the main
# processing of the request (so effectively gets to modify the
# request before it gets to the main method).
#
# Returning a response triggers a shortcut to fail out. The
# response will nearly always be a failure condition, as it ends
# up skipping further processing one level up from here.
#
# This then passes on the list of extensions, in reverse order,
# on. post_process will run through all those, again with same
# basic logic.
#
# In tree this is only used in the legacy v2 stack, and only in
# the DiskConfig and SchedulerHints from what I can see.
#
# pre_process_extensions can be removed when the legacyv2 code
# goes away. post_process_extensions can be massively simplified
# at that point.
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = next(gen)
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# None is response, it means we keep going. We reverse the
# extension list for post-processing.
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
def _should_have_body(self, request):
return request.method in _METHODS_WITH_BODY
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request()
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=e.format_message()))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=e.format_message()))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# NOTE(sdague): we filter out InvalidContentTypes early so we
# know everything is good from here on out.
try:
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': six.text_type(body, 'utf-8'),
'meth': str(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': str(meth)})
# Now, deserialize the request body...
try:
contents = {}
if self._should_have_body(request):
# allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
contents = self.deserialize(body)
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept)
if hasattr(response, 'headers'):
for hdr, val in list(response.headers.items()):
# Headers must be utf-8 strings
response.headers[hdr] = utils.utf8(val)
if not request.api_version_request.is_null():
response.headers[API_VERSION_REQUEST_HEADER] = \
request.api_version_request.get_string()
response.headers['Vary'] = API_VERSION_REQUEST_HEADER
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
action_name = action_peek(body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
class ResourceV21(Resource):
support_api_request_version = True
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
versioned_methods = None
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
if base.__name__ == "Controller":
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if VER_METHOD_ATTR in base.__dict__:
versioned_methods = getattr(base, VER_METHOD_ATTR)
delattr(base, VER_METHOD_ATTR)
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = versioned_methods
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
@return: Returns the result of the method called
@raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
ver = kwargs['req'].api_version_request
else:
ver = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if ver.matches(func.start_version, func.end_version):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(version=ver)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if version_meth_dict and \
key in object.__getattribute__(self, VER_METHOD_ATTR):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None):
"""Decorator for versioning api methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
@min_ver: string representing minimum version
@max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
# TODO(cyeoh): Add check to ensure that there are no overlapping
# ranges of valid versions as that is amibiguous
func_list.sort(key=lambda f: f.start_version, reverse=True)
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
return is_dict(body[entity_name])
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in list(self.wrapped_exc.headers.items()):
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if not req.api_version_request.is_null():
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \
req.api_version_request.get_string()
self.wrapped_exc.headers['Vary'] = \
API_VERSION_REQUEST_HEADER
self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class RateLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `RateLimitFault` with relevant information."""
hdrs = RateLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs)
self.content = {
"overLimit": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Return the wrapped exception with a serialized body conforming
to our error format.
"""
user_locale = request.best_match_language()
self.content['overLimit']['message'] = \
i18n.translate(self.content['overLimit']['message'], user_locale)
self.content['overLimit']['details'] = \
i18n.translate(self.content['overLimit']['details'], user_locale)
content = JSONDictSerializer().serialize(self.content)
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.content_type = "application/json"
self.wrapped_exc.text = content
return self.wrapped_exc
|
the-stack_0_13565 | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import base64
import datetime
import json
import pickle
import os
from cms.api import create_page
from django import http
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple, RelatedFieldWidgetWrapper
from django.core import urlresolvers
from django.core.cache import cache
from django.core.exceptions import (
ValidationError, ImproperlyConfigured, ObjectDoesNotExist)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.forms.widgets import Media
from django.test.testcases import TestCase
from django.utils import timezone
from cms import api
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered, DontUsePageAttributeWarning
from cms.models import Page, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.sitemaps.cms_sitemap import CMSSitemap
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import (
Article, Section, ArticlePluginModel,
FKModel,
M2MTargetModel)
from cms.test_utils.project.pluginapp.plugins.meta.cms_plugins import (
TestPlugin, TestPlugin2, TestPlugin3, TestPlugin4, TestPlugin5)
from cms.test_utils.project.pluginapp.plugins.validation.cms_plugins import (
NonExisitngRenderTemplate, NoRender, NoRenderButChildren, DynTemplate)
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE, URL_CMS_PLUGIN_MOVE, URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE,
URL_CMS_PLUGIN_REMOVE, URL_CMS_PAGE_PUBLISH, URL_CMS_PLUGINS_COPY)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.i18n import force_language
from cms.utils.plugins import get_plugins_for_page, get_plugins
from django.utils.http import urlencode
from djangocms_googlemap.models import GoogleMap
from djangocms_inherit.cms_plugins import InheritPagePlaceholderPlugin
from djangocms_file.models import File
from djangocms_inherit.models import InheritPagePlaceholder
from djangocms_link.forms import LinkForm
from djangocms_link.models import Link
from djangocms_picture.models import Picture
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import plugin_to_tag
@contextmanager
def register_plugins(*plugins):
for plugin in plugins:
plugin_pool.register_plugin(plugin)
try:
yield
finally:
for plugin in plugins:
plugin_pool.unregister_plugin(plugin)
class DumbFixturePlugin(CMSPluginBase):
model = CMSPlugin
name = "Dumb Test Plugin. It does nothing."
render_template = ""
admin_preview = False
render_plugin = False
def render(self, context, instance, placeholder):
return context
class DumbFixturePluginWithUrls(DumbFixturePlugin):
name = DumbFixturePlugin.name + " With custom URLs."
render_plugin = False
def _test_view(self, request):
return http.HttpResponse("It works")
def get_plugin_urls(self):
return [
url(r'^testview/$', admin.site.admin_view(self._test_view), name='dumbfixtureplugin'),
]
plugin_pool.register_plugin(DumbFixturePluginWithUrls)
class PluginsTestBaseCase(CMSTestCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def approve_page(self, page):
response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk)
self.assertRedirects(response, URL_CMS_PAGE)
# reload page
return self.reload_page(page)
def get_request(self, *args, **kwargs):
request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs)
request.placeholder_media = Media()
request.toolbar = CMSToolbar(request)
return request
def get_response_pk(self, response):
return int(response.content.decode('utf8').split("/edit-plugin/")[1].split("/")[0])
def get_placeholder(self):
return Placeholder.objects.create(slot='test')
class PluginsTestCase(PluginsTestBaseCase):
def _create_text_plugin_on_page(self, page):
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot="body"),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=''
)
return plugin.pk
def _edit_text_plugin(self, plugin_id, text):
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": text
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.get(pk=plugin_id)
return txt
def test_add_edit_plugin(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
txt = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", txt.body)
def test_plugin_add_form_integrity(self):
admin.autodiscover()
admin_instance = admin.site._registry[ArticlePluginModel]
placeholder = self.get_placeholder()
url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "ArticlePlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholder.pk,
})
superuser = self.get_superuser()
plugin = plugin_pool.get_plugin('ArticlePlugin')
with self.login_user_context(superuser):
request = self.get_request(url)
PluginFormClass = plugin(
model=plugin.model,
admin_site=admin.site,
).get_form(request)
plugin_fields = list(PluginFormClass.base_fields.keys())
OriginalFormClass = admin_instance.get_form(request)
original_fields = list(OriginalFormClass.base_fields.keys())
# Assert both forms have the same fields
self.assertEqual(plugin_fields, original_fields)
# Now assert the plugin form has the related field wrapper
# widget on the sections field.
self.assertIsInstance(
PluginFormClass.base_fields['sections'].widget,
RelatedFieldWidgetWrapper,
)
# Now assert the admin form has the related field wrapper
# widget on the sections field.
self.assertIsInstance(
OriginalFormClass.base_fields['sections'].widget,
RelatedFieldWidgetWrapper,
)
# Now assert the plugin form has the filtered select multiple
# widget wrapped by the related field wrapper
self.assertIsInstance(
PluginFormClass.base_fields['sections'].widget.widget,
FilteredSelectMultiple,
)
# Now assert the admin form has the filtered select multiple
# widget wrapped by the related field wrapper
self.assertIsInstance(
OriginalFormClass.base_fields['sections'].widget.widget,
FilteredSelectMultiple,
)
def test_excluded_plugin(self):
"""
Test that you can't add a text plugin
"""
CMS_PLACEHOLDER_CONF = {
'body': {
'excluded_plugins': ['TextPlugin']
}
}
# try to add a new text plugin
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
installed_plugins = plugin_pool.get_all_plugins('body', page)
installed_plugins = [cls.__name__ for cls in installed_plugins]
self.assertNotIn('TextPlugin', installed_plugins)
CMS_PLACEHOLDER_CONF = {
'body': {
'plugins': ['TextPlugin'],
'excluded_plugins': ['TextPlugin']
}
}
# try to add a new text plugin
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
installed_plugins = plugin_pool.get_all_plugins('body', page)
installed_plugins = [cls.__name__ for cls in installed_plugins]
self.assertNotIn('TextPlugin', installed_plugins)
def test_plugin_edit_marks_page_dirty(self):
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
created_plugin_id = self._create_text_plugin_on_page(page)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), False)
self._edit_text_plugin(created_plugin_id, "Hello World")
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
def test_plugin_order(self):
"""
Test that plugin position is saved after creation
"""
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
text_plugin_2 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the second")
db_plugin_1 = CMSPlugin.objects.get(pk=text_plugin_1.pk)
db_plugin_2 = CMSPlugin.objects.get(pk=text_plugin_2.pk)
with self.settings(CMS_PERMISSION=False):
self.assertEqual(text_plugin_1.position, 0)
self.assertEqual(db_plugin_1.position, 0)
self.assertEqual(text_plugin_2.position, 1)
self.assertEqual(db_plugin_2.position, 1)
## Finally we render the placeholder to test the actual content
rendered_placeholder = ph_en.render(self.get_context(page_en.get_absolute_url(), page=page_en), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the second")
def test_plugin_order_alt(self):
"""
Test that plugin position is saved after creation
"""
draft_page = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_2 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the second")
text_plugin_3 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the third")
# Publish to create a 'live' version
draft_page.publish('en')
draft_page = draft_page.reload()
placeholder = draft_page.placeholders.get(slot="col_left")
# Add a plugin and move it to the first position
text_plugin_1 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the first")
data = {
'placeholder_id': placeholder.id,
'plugin_id': text_plugin_1.id,
'plugin_parent': '',
'plugin_language': 'en',
'plugin_order[]': [text_plugin_1.id, text_plugin_2.id, text_plugin_3.id],
}
self.client.post(URL_CMS_PLUGIN_MOVE, data)
draft_page.publish('en')
draft_page = draft_page.reload()
live_page = draft_page.get_public_object()
placeholder = draft_page.placeholders.get(slot="col_left")
live_placeholder = live_page.placeholders.get(slot="col_left")
with self.settings(CMS_PERMISSION=False):
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_1.pk).position, 0)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_2.pk).position, 1)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_3.pk).position, 2)
## Finally we render the placeholder to test the actual content
rendered_placeholder = placeholder.render(self.get_context(draft_page.get_absolute_url(), page=draft_page), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the secondI'm the third")
rendered_live_placeholder = live_placeholder.render(self.get_context(live_page.get_absolute_url(), page=live_page), None)
self.assertEqual(rendered_live_placeholder, "I'm the firstI'm the secondI'm the third")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column = api.add_plugin(
placeholder,
"ColumnPlugin",
"en",
target=columns,
width='10%',
)
data = {
'placeholder_id': placeholder.id,
'plugin_id': text_plugin_1.id,
'plugin_parent': '',
'plugin_language': 'en',
'plugin_order[]': [
text_plugin_1.id,
text_plugin_2.id,
text_plugin_3.id,
columns.id,
column.id,
],
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, data)
self.assertEqual(response.status_code, 400)
self.assertContains(
response,
'order parameter references plugins in different trees',
status_code=400,
)
def test_plugin_breadcrumbs(self):
"""
Test the plugin breadcrumbs order
"""
draft_page = api.create_page("home", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%')
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column, body="I'm the second")
text_breadcrumbs = text_plugin.get_breadcrumb()
self.assertEqual(len(columns.get_breadcrumb()), 1)
self.assertEqual(len(column.get_breadcrumb()), 2)
self.assertEqual(len(text_breadcrumbs), 3)
self.assertTrue(text_breadcrumbs[0]['title'], columns.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[1]['title'], column.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[2]['title'], text_plugin.get_plugin_class().name)
self.assertTrue('/edit-plugin/%s/'% columns.pk in text_breadcrumbs[0]['url'])
self.assertTrue('/edit-plugin/%s/'% column.pk, text_breadcrumbs[1]['url'])
self.assertTrue('/edit-plugin/%s/'% text_plugin.pk, text_breadcrumbs[2]['url'])
def test_extract_images_from_text(self):
img_path = os.path.join(os.path.dirname(__file__), 'data', 'image.jpg')
with open(img_path, 'rb') as fobj:
img_data = base64.b64encode(fobj.read()).decode('utf-8')
body = """<p>
<img alt='' src='data:image/jpeg;base64,{data}' />
</p>""".format(data=img_data)
page = api.create_page(
title='test page',
template='nav_playground.html',
language=settings.LANGUAGES[0][0],
)
plugin = api.add_plugin(
page.placeholders.get(slot="body"),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=body,
)
self.assertEqual(plugin.get_children().count(), 1)
def test_add_text_plugin_empty_tag(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page = api.create_page(
title='test page',
template='nav_playground.html',
language=settings.LANGUAGES[0][0],
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body='<div class="someclass"></div><p>foo</p>'
)
self.assertEqual(plugin.body, '<div class="someclass"></div><p>foo</p>')
def test_add_text_plugin_html_sanitizer(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page = api.create_page(
title='test page',
template='nav_playground.html',
language=settings.LANGUAGES[0][0],
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body='<script>var bar="hacked"</script>'
)
self.assertEqual(
plugin.body,
'<script>var bar="hacked"</script>'
)
def test_copy_plugins_method(self):
"""
Test that CMSPlugin copy does not have side effects
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
#
text_plugin_en.body += plugin_to_tag(link_plugin_en)
text_plugin_en.save()
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# setup the plugins to copy
plugins = [text_plugin_en, link_plugin_en]
# save the old ids for check
old_ids = [plugin.pk for plugin in plugins]
new_plugins = []
plugins_ziplist = []
old_parent_cache = {}
# This is a stripped down version of cms.copy_plugins.copy_plugins_to
# to low-level testing the copy process
for plugin in plugins:
new_plugins.append(plugin.copy_plugin(ph_de, 'de', old_parent_cache))
plugins_ziplist.append((new_plugins[-1], plugin))
for idx, plugin in enumerate(plugins):
inst, _ = new_plugins[idx].get_plugin_instance()
new_plugins[idx] = inst
new_plugins[idx].post_copy(plugin, plugins_ziplist)
for idx, plugin in enumerate(plugins):
# original plugin instance reference should stay unmodified
self.assertEqual(old_ids[idx], plugin.pk)
# new plugin instance should be different from the original
self.assertNotEqual(new_plugins[idx], plugin.pk)
# text plugins (both old and new) should contain a reference
# to the link plugins
if plugin.plugin_type == 'TextPlugin':
self.assertTrue('Link - A Link' in plugin.body)
self.assertTrue('id="%s"' % plugin.get_children()[0].pk in plugin.body)
self.assertTrue('Link - A Link' in new_plugins[idx].body)
self.assertTrue('id="%s"' % new_plugins[idx].get_children()[0].pk in new_plugins[idx].body)
def test_plugin_position(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
placeholder = page_en.placeholders.get(slot="body") # ID 2
placeholder_right = page_en.placeholders.get(slot="right-column")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en") # ID 1
column_1 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%') # ID 2
column_2 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='30%') # ID 3
first_text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the first") # ID 4
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the second") # ID 5
returned_1 = copy_plugins_to([text_plugin], placeholder, 'en', column_1.pk) # ID 6
returned_2 = copy_plugins_to([text_plugin], placeholder_right, 'en') # ID 7
returned_3 = copy_plugins_to([text_plugin], placeholder, 'en', column_2.pk) # ID 8
# STATE AT THIS POINT:
# placeholder
# - columns
# - column_1
# - text_plugin "I'm the first" created here
# - text_plugin "I'm the second" created here
# - text_plugin "I'm the second" (returned_1) copied here
# - column_2
# - text_plugin "I'm the second" (returned_3) copied here
# placeholder_right
# - text_plugin "I'm the second" (returned_2) copied here
# First plugin in the plugin branch
self.assertEqual(first_text_plugin.position, 0)
# Second plugin in the plugin branch
self.assertEqual(text_plugin.position, 1)
# Added as third plugin in the same branch as the above
self.assertEqual(returned_1[0][0].position, 2)
# First plugin in a placeholder
self.assertEqual(returned_2[0][0].position, 0)
# First plugin nested in a plugin
self.assertEqual(returned_3[0][0].position, 0)
def test_copy_plugins(self):
"""
Test that copying plugins works as expected.
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.get_plugins(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body)
# test subplugin copy
copy_plugins_to([link_plugin_en], ph_de, 'de')
def test_deep_copy_plugins(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
# Grid wrapper 1
mcol1_en = api.add_plugin(ph_en, "MultiColumnPlugin", "en", position="first-child")
# Grid column 1.1
col1_en = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1_en)
# Grid column 1.2
col2_en = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1_en)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(
ph_en,
"LinkPlugin",
"en",
target=col2_en,
name="A Link",
url="https://www.django-cms.org"
)
old_plugins = [mcol1_en, col1_en, col2_en, link_plugin_en]
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_de = page_de.placeholders.get(slot="body")
# Grid wrapper 1
mcol1_de = api.add_plugin(ph_de, "MultiColumnPlugin", "de", position="first-child")
# Grid column 1.1
col1_de = api.add_plugin(ph_de, "ColumnPlugin", "de", position="first-child", target=mcol1_de)
copy_plugins_to(
old_plugins=[mcol1_en, col1_en, col2_en, link_plugin_en],
to_placeholder=ph_de,
to_language='de',
parent_plugin_id=col1_de.pk,
)
col1_de = self.reload(col1_de)
new_plugins = col1_de.get_descendants().order_by('path')
self.assertEqual(new_plugins.count(), len(old_plugins))
for old_plugin, new_plugin in zip(old_plugins, new_plugins):
self.assertEqual(old_plugin.numchild, new_plugin.numchild)
with self.assertNumQueries(FuzzyInt(0, 207)):
page_en.publish('en')
def test_plugin_validation(self):
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NonExisitngRenderTemplate)
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NoRender)
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NoRenderButChildren)
plugin_pool.validate_templates(DynTemplate)
def test_remove_plugin_before_published(self):
"""
When removing a draft plugin we would expect the public copy of the plugin to also be removed
"""
# add a page
page = api.create_page(
title='test page',
language=settings.LANGUAGES[0][0],
template='nav_playground.html'
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot="body"),
language='en',
plugin_type='TextPlugin',
body=''
)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': plugin.pk
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin.pk
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(0, CMSPlugin.objects.all().count())
def test_remove_plugin_after_published(self):
# add a page
page = api.create_page("home", "nav_playground.html", "en")
# add a plugin
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=''
)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 1)
# publish page
response = self.client.post(URL_CMS_PAGE + "%d/en/publish/" % page.pk, {1: 1})
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 2)
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# delete the plugin
plugin_data = {
'plugin_id': plugin.pk
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin.pk
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=False).count(), 1)
def test_remove_plugin_not_associated_to_page(self):
"""
Test case for PlaceholderField
"""
page = api.create_page(
title='test page',
template='nav_playground.html',
language='en'
)
# add a plugin
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=''
)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
ph = Placeholder(slot="subplugin")
ph.save()
url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder': ph.pk,
'plugin_parent': plugin.pk
})
response = self.client.post(url, {'body': ''})
# no longer allowed for security reasons
self.assertEqual(response.status_code, 400)
def test_register_plugin_twice_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
# The first time we register the plugin is should work
with register_plugins(DumbFixturePlugin):
# Let's add it a second time. We should catch and exception
raised = False
try:
plugin_pool.register_plugin(DumbFixturePlugin)
except PluginAlreadyRegistered:
raised = True
self.assertTrue(raised)
# Let's make sure we have the same number of plugins as before:
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_unregister_non_existing_plugin_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
raised = False
try:
# There should not be such a plugin registered if the others tests
# don't leak plugins
plugin_pool.unregister_plugin(DumbFixturePlugin)
except PluginNotRegistered:
raised = True
self.assertTrue(raised)
# Let's count, to make sure we didn't remove a plugin accidentally.
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_inheritplugin_media(self):
"""
Test case for InheritPagePlaceholder
"""
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en')
body = inheritfrompage.placeholders.get(slot="body")
plugin = GoogleMap(
plugin_type='GoogleMapPlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
address="Riedtlistrasse 16",
zipcode="8006",
city="Zurich",
)
plugin.add_root(instance=plugin)
inheritfrompage.publish('en')
page = api.create_page('inherit from page',
'nav_playground.html',
'en',
published=True)
inherited_body = page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language=settings.LANGUAGE_CODE,
from_page=inheritfrompage,
from_language=settings.LANGUAGE_CODE)
inherit_plugin.add_root(instance=inherit_plugin)
page.publish('en')
self.client.logout()
cache.clear()
# TODO: Replace this test using a Test Plugin, not an externally managed one.
# response = self.client.get(page.get_absolute_url())
# self.assertTrue(
# 'https://maps-api-ssl.google.com/maps/api/js' in response.content.decode('utf8').replace("&", "&"))
def test_inherit_plugin_with_empty_plugin(self):
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en', published=True)
body = inheritfrompage.placeholders.get(slot="body")
empty_plugin = CMSPlugin(
plugin_type='TextPlugin', # create an empty plugin
placeholder=body,
position=1,
language='en',
)
empty_plugin.add_root(instance=empty_plugin)
other_page = api.create_page('other page', 'nav_playground.html', 'en', published=True)
inherited_body = other_page.placeholders.get(slot="body")
api.add_plugin(inherited_body, InheritPagePlaceholderPlugin, 'en', position='last-child',
from_page=inheritfrompage, from_language='en')
api.add_plugin(inherited_body, "TextPlugin", "en", body="foobar")
# this should not fail, even if there in an empty plugin
rendered = inherited_body.render(context=self.get_context(other_page.get_absolute_url(), page=other_page), width=200)
self.assertIn("foobar", rendered)
def test_search_pages(self):
"""
Test search for pages
To be fully useful, this testcase needs to have the following different
Plugin configurations within the project:
* unaltered cmsplugin_ptr
* cmsplugin_ptr with related_name='+'
* cmsplugin_ptr with related_query_name='+'
* cmsplugin_ptr with related_query_name='whatever_foo'
* cmsplugin_ptr with related_name='whatever_bar'
* cmsplugin_ptr with related_query_name='whatever_foo' and related_name='whatever_bar'
Those plugins are in cms/test_utils/project/pluginapp/revdesc/models.py
"""
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
text = Text(body="hello", language="en", placeholder=placeholder, plugin_type="TextPlugin", position=1)
text.save()
page.publish('en')
self.assertEqual(Page.objects.search("hi").count(), 0)
self.assertEqual(Page.objects.search("hello").count(), 1)
def test_empty_plugin_is_not_ignored(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
# this should not raise any errors, but just ignore the empty plugin
out = placeholder.render(self.get_context(), width=300)
self.assertFalse(len(out))
self.assertTrue(len(placeholder._plugins_cache))
def test_pickle(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
text_plugin = api.add_plugin(
placeholder,
"TextPlugin",
'en',
body="Hello World",
)
cms_plugin = text_plugin.cmsplugin_ptr
# assert we can pickle and unpickle a solid plugin (subclass)
self.assertEqual(text_plugin, pickle.loads(pickle.dumps(text_plugin)))
# assert we can pickle and unpickle a cms plugin (parent)
self.assertEqual(cms_plugin, pickle.loads(pickle.dumps(cms_plugin)))
def test_defer_pickle(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
api.add_plugin(placeholder, "TextPlugin", 'en', body="Hello World")
plugins = Text.objects.all().defer('path')
import io
a = io.BytesIO()
pickle.dump(plugins[0], a)
def test_empty_plugin_description(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
self.assertEqual(a.get_short_description(), "<Empty>")
def test_page_attribute_warns(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
a.save()
def get_page(plugin):
return plugin.page
self.assertWarns(
DontUsePageAttributeWarning,
"Don't use the page attribute on CMSPlugins! CMSPlugins are not guaranteed to have a page associated with them!",
get_page, a
)
def test_set_translatable_content(self):
a = Text(body="hello")
self.assertTrue(a.set_translatable_content({'body': 'world'}))
b = Link(name="hello")
self.assertTrue(b.set_translatable_content({'name': 'world'}))
def test_editing_plugin_changes_page_modification_time_in_sitemap(self):
now = timezone.now()
one_day_ago = now - datetime.timedelta(days=1)
page = api.create_page("page", "nav_playground.html", "en", published=True)
title = page.get_title_obj('en')
page.creation_date = one_day_ago
page.changed_date = one_day_ago
plugin_id = self._create_text_plugin_on_page(page)
plugin = self._edit_text_plugin(plugin_id, "fnord")
actual_last_modification_time = CMSSitemap().lastmod(title)
actual_last_modification_time -= datetime.timedelta(microseconds=actual_last_modification_time.microsecond)
self.assertEqual(plugin.changed_date.date(), actual_last_modification_time.date())
def test_moving_plugin_to_different_placeholder(self):
with register_plugins(DumbFixturePlugin):
page = api.create_page(
"page",
"nav_playground.html",
"en"
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='DumbFixturePlugin',
language=settings.LANGUAGES[0][0]
)
child_plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='DumbFixturePlugin',
language=settings.LANGUAGES[0][0],
parent=plugin
)
post = {
'plugin_id': child_plugin.pk,
'placeholder_id': page.placeholders.get(slot='right-column').pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
from cms.utils.plugins import build_plugin_tree
build_plugin_tree(page.placeholders.get(slot='right-column').get_plugins_list())
def test_get_plugins_for_page(self):
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm inside an existing placeholder.")
# This placeholder is not in the template.
ph_en_not_used = page_en.placeholders.create(slot="not_used")
text_plugin_2 = api.add_plugin(ph_en_not_used, "TextPlugin", "en", body="I'm inside a non-existent placeholder.")
page_plugins = get_plugins_for_page(None, page_en, page_en.get_title_obj_attribute('language'))
db_text_plugin_1 = page_plugins.get(pk=text_plugin_1.pk)
self.assertRaises(CMSPlugin.DoesNotExist, page_plugins.get, pk=text_plugin_2.pk)
self.assertEqual(db_text_plugin_1.pk, text_plugin_1.pk)
def test_plugin_move_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
with register_plugins(ReloadDrivenPlugin, NonReloadDrivenPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
plugin_1 = api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
with force_language('en'):
plugin_1_action_urls = plugin_1.get_action_urls()
reload_expected = {
'reload': True,
'urls': plugin_1_action_urls,
}
# Test Plugin reload == True on Move
post = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), reload_expected)
with force_language('en'):
plugin_2_action_urls = plugin_2.get_action_urls()
no_reload_expected = {
'reload': False,
'urls': plugin_2_action_urls,
}
# Test Plugin reload == False on Move
post = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), no_reload_expected)
def test_plugin_copy_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
with register_plugins(ReloadDrivenPlugin, NonReloadDrivenPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
# Test Plugin reload == True on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], True)
# Test Plugin reload == False on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'source_plugin_id': plugin_2.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], False)
def test_custom_plugin_urls(self):
plugin_url = urlresolvers.reverse('admin:dumbfixtureplugin')
response = self.client.get(plugin_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"It works")
def test_plugin_require_parent(self):
"""
Assert that a plugin marked as 'require_parent' is not listed
in the plugin pool when a placeholder is specified
"""
ParentRequiredPlugin = type('ParentRequiredPlugin', (CMSPluginBase,),
dict(require_parent=True, render_plugin=False))
with register_plugins(ParentRequiredPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
plugin_list = plugin_pool.get_all_plugins(placeholder=placeholder, page=page)
self.assertFalse(ParentRequiredPlugin in plugin_list)
def test_plugin_toolbar_struct(self):
# Tests that the output of the plugin toolbar structure.
GenericParentPlugin = type('GenericParentPlugin', (CMSPluginBase,), {'render_plugin':False})
with register_plugins(GenericParentPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
from cms.utils.placeholder import get_toolbar_plugin_struct
expected_struct = {'module': u'Generic',
'name': u'Parent Classes Plugin',
'value': 'ParentClassesPlugin'}
toolbar_struct = get_toolbar_plugin_struct([GenericParentPlugin],
placeholder.slot,
page,)
self.assertFalse(expected_struct in toolbar_struct)
def test_plugin_child_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ChildClassesPlugin = type('ChildClassesPlugin', (CMSPluginBase,),
dict(child_classes=['TextPlugin'], render_template='allow_children_plugin.html'))
with register_plugins(ChildClassesPlugin):
plugin = api.add_plugin(placeholder, ChildClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_child_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'child_classes': {
'ChildClassesPlugin': ['LinkPlugin', 'PicturePlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['LinkPlugin', 'PicturePlugin'],
plugin.get_child_classes(placeholder.slot, page))
def test_plugin_parent_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentClassesPlugin = type('ParentClassesPlugin', (CMSPluginBase,),
dict(parent_classes=['TextPlugin'], render_plugin=False))
with register_plugins(ParentClassesPlugin):
plugin = api.add_plugin(placeholder, ParentClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_parent_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'parent_classes': {
'ParentClassesPlugin': ['TestPlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['TestPlugin'],
plugin.get_parent_classes(placeholder.slot, page))
def test_plugin_parent_classes_from_object(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentPlugin = type('ParentPlugin', (CMSPluginBase,),
dict(render_plugin=False))
ChildPlugin = type('ChildPlugin', (CMSPluginBase,),
dict(parent_classes=['ParentPlugin'], render_plugin=False))
with register_plugins(ParentPlugin, ChildPlugin):
plugin = api.add_plugin(placeholder, ParentPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
child_classes = plugin.get_child_classes(placeholder.slot, page)
self.assertIn('ChildPlugin', child_classes)
self.assertIn('ParentPlugin', child_classes)
def test_plugin_require_parent_from_object(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentPlugin = type('ParentPlugin', (CMSPluginBase,),
dict(render_plugin=False))
ChildPlugin = type('ChildPlugin', (CMSPluginBase,),
dict(require_parent=True, render_plugin=False))
with register_plugins(ParentPlugin, ChildPlugin):
plugin = api.add_plugin(placeholder, ParentPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
child_classes = plugin.get_child_classes(placeholder.slot, page)
self.assertIn('ChildPlugin', child_classes)
self.assertIn('ParentPlugin', child_classes)
def test_plugin_translatable_content_getter_setter(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
plugin = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", plugin.body)
# see if the getter works
self.assertEqual({'body': "Hello World"}, plugin.get_translatable_content())
# change the content
self.assertEqual(True, plugin.set_translatable_content({'body': "It works!"}))
# check if it changed
self.assertEqual("It works!", plugin.body)
# double check through the getter
self.assertEqual({'body': "It works!"}, plugin.get_translatable_content())
def test_plugin_pool_register_returns_plugin_class(self):
@plugin_pool.register_plugin
class DecoratorTestPlugin(CMSPluginBase):
render_plugin = False
name = "Test Plugin"
self.assertIsNotNone(DecoratorTestPlugin)
class FileSystemPluginTests(PluginsTestBaseCase):
def setUp(self):
super(FileSystemPluginTests, self).setUp()
call_command('collectstatic', interactive=False, verbosity=0, link=True)
def tearDown(self):
for directory in [settings.STATIC_ROOT, settings.MEDIA_ROOT]:
for root, dirs, files in os.walk(directory, topdown=False):
# We need to walk() the directory tree since rmdir() does not allow
# to remove non-empty directories...
for name in files:
# Start by killing all files we walked
os.remove(os.path.join(root, name))
for name in dirs:
# Now all directories we walked...
os.rmdir(os.path.join(root, name))
super(FileSystemPluginTests, self).tearDown()
def test_fileplugin_icon_uppercase(self):
page = api.create_page('testpage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot="body")
plugin = File(
plugin_type='FilePlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
)
# This try/except block allows older and newer versions of the
# djangocms-file plugin to work here.
try:
plugin.file.save("UPPERCASE.JPG", SimpleUploadedFile(
"UPPERCASE.jpg", b"content"), False)
except ObjectDoesNotExist: # catches 'RelatedObjectDoesNotExist'
plugin.source.save("UPPERCASE.JPG", SimpleUploadedFile(
"UPPERCASE.jpg", b"content"), False)
plugin.add_root(instance=plugin)
self.assertNotEquals(plugin.get_icon_url().find('jpg'), -1)
class PluginManyToManyTestCase(PluginsTestBaseCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
# create 3 sections
self.sections = []
self.section_pks = []
for i in range(3):
section = Section.objects.create(name="section %s" % i)
self.sections.append(section)
self.section_pks.append(section.pk)
self.section_count = len(self.sections)
# create 10 articles by section
for section in self.sections:
for j in range(10):
Article.objects.create(
title="article %s" % j,
section=section
)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
def test_dynamic_plugin_template(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="a title")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="custom template")
request = self.get_request(path=page_en.get_absolute_url())
plugins = get_plugins(request, ph_en, page_en.template)
for plugin in plugins:
if plugin.title == 'custom template':
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles_custom.html')
self.assertTrue('Articles Custom template' in plugin.render_plugin({}, ph_en))
else:
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles.html')
self.assertFalse('Articles Custom template' in plugin.render_plugin({}, ph_en))
def test_add_plugin_with_m2m(self):
# add a new text plugin
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
page.publish('en')
placeholder = page.placeholders.get(slot="body")
add_url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'placeholder_id': placeholder.pk,
})
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(self.section_count, plugin.sections.count())
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
self.assertEqual(plugin.sections.through._meta.db_table, 'manytomany_rel_articlepluginmodel_sections')
def test_add_plugin_with_m2m_and_publisher(self):
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
# add a plugin
data = {
'title': "Articles Plugin 1",
'sections': self.section_pks
}
add_url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'placeholder_id': placeholder.pk,
})
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/cms/page/plugin/confirm_form.html')
# there should be only 1 plugin
self.assertEqual(1, CMSPlugin.objects.all().count())
self.assertEqual(1, ArticlePluginModel.objects.count())
articles_plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(u'Articles Plugin 1', articles_plugin.title)
self.assertEqual(self.section_count, articles_plugin.sections.count())
# check publish box
api.publish_page(page, self.super_user, 'en')
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(2, ArticlePluginModel.objects.all().count())
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
def test_copy_plugin_with_m2m(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = ArticlePluginModel(
plugin_type='ArticlePlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
edit_url = URL_CMS_PLUGIN_EDIT + str(plugin.pk) + "/"
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
self.assertEqual(ArticlePluginModel.objects.all()[0].sections.count(), self.section_count)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE + "?language=%s" % self.SECOND_LANG)
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(Page.objects.all().count(), 1)
copy_data = {
'source_placeholder_id': placeholder.pk,
'target_placeholder_id': placeholder.pk,
'target_language': self.SECOND_LANG,
'source_language': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PLUGINS_COPY, copy_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf8').count('"position":'), 1)
# assert copy success
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.count(), 2)
db_counts = [plgn.sections.count() for plgn in ArticlePluginModel.objects.all()]
expected = [self.section_count for _ in range(len(db_counts))]
self.assertEqual(expected, db_counts)
class PluginCopyRelationsTestCase(PluginsTestBaseCase):
"""Test the suggestions in the docs for copy_relations()"""
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
page_data1 = self.get_new_page_data_dbfields()
page_data1['published'] = False
self.page1 = api.create_page(**page_data1)
page_data2 = self.get_new_page_data_dbfields()
page_data2['published'] = False
self.page2 = api.create_page(**page_data2)
self.placeholder1 = self.page1.placeholders.get(slot='body')
self.placeholder2 = self.page2.placeholders.get(slot='body')
def test_copy_fk_from_model(self):
plugin = api.add_plugin(
placeholder=self.placeholder1,
plugin_type="PluginWithFKFromModel",
language=self.FIRST_LANG,
)
FKModel.objects.create(fk_field=plugin)
old_public_count = FKModel.objects.filter(
fk_field__placeholder__page__publisher_is_draft=False
).count()
api.publish_page(
self.page1,
self.super_user,
self.FIRST_LANG
)
new_public_count = FKModel.objects.filter(
fk_field__placeholder__page__publisher_is_draft=False
).count()
self.assertEqual(
new_public_count,
old_public_count + 1
)
def test_copy_m2m_to_model(self):
plugin = api.add_plugin(
placeholder=self.placeholder1,
plugin_type="PluginWithM2MToModel",
language=self.FIRST_LANG,
)
m2m_target = M2MTargetModel.objects.create()
plugin.m2m_field.add(m2m_target)
old_public_count = M2MTargetModel.objects.filter(
pluginmodelwithm2mtomodel__placeholder__page__publisher_is_draft=False
).count()
api.publish_page(
self.page1,
self.super_user,
self.FIRST_LANG
)
new_public_count = M2MTargetModel.objects.filter(
pluginmodelwithm2mtomodel__placeholder__page__publisher_is_draft=False
).count()
self.assertEqual(
new_public_count,
old_public_count + 1
)
class PluginsMetaOptionsTests(TestCase):
''' TestCase set for ensuring that bugs like #992 are caught '''
# these plugins are inlined because, due to the nature of the #992
# ticket, we cannot actually import a single file with all the
# plugin variants in, because that calls __new__, at which point the
# error with splitted occurs.
def test_meta_options_as_defaults(self):
''' handling when a CMSPlugin meta options are computed defaults '''
# this plugin relies on the base CMSPlugin and Model classes to
# decide what the app_label and db_table should be
plugin = TestPlugin.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_as_declared_defaults(self):
''' handling when a CMSPlugin meta options are declared as per defaults '''
# here, we declare the db_table and app_label explicitly, but to the same
# values as would be computed, thus making sure it's not a problem to
# supply options.
plugin = TestPlugin2.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel2')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_app_label(self):
''' make sure customised meta options on CMSPlugins don't break things '''
plugin = TestPlugin3.model
self.assertEqual(plugin._meta.db_table, 'one_thing_testpluginmodel3')
self.assertEqual(plugin._meta.app_label, 'one_thing')
def test_meta_options_custom_db_table(self):
''' make sure custom database table names are OK. '''
plugin = TestPlugin4.model
self.assertEqual(plugin._meta.db_table, 'or_another_4')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_both(self):
''' We should be able to customise app_label and db_table together '''
plugin = TestPlugin5.model
self.assertEqual(plugin._meta.db_table, 'or_another_5')
self.assertEqual(plugin._meta.app_label, 'one_thing')
class LinkPluginTestCase(PluginsTestBaseCase):
def test_does_not_verify_existance_of_url(self):
form = LinkForm(
{'name': 'Linkname', 'url': 'http://www.nonexistant.test'})
self.assertTrue(form.is_valid())
def test_opens_in_same_window_by_default(self):
"""Could not figure out how to render this plugin
Checking only for the values in the model"""
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test'})
link = form.save()
self.assertEqual(link.target, '')
def test_open_in_blank_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_blank'})
link = form.save()
self.assertEqual(link.target, '_blank')
def test_open_in_parent_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_parent'})
link = form.save()
self.assertEqual(link.target, '_parent')
def test_open_in_top_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_top'})
link = form.save()
self.assertEqual(link.target, '_top')
def test_open_in_nothing_else(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': 'artificial'})
self.assertFalse(form.is_valid())
class NoDatabasePluginTests(TestCase):
def test_render_meta_is_unique(self):
text = Text()
link = Link()
self.assertNotEqual(id(text._render_meta), id(link._render_meta))
def test_render_meta_does_not_leak(self):
text = Text()
link = Link()
text._render_meta.text_enabled = False
link._render_meta.text_enabled = False
self.assertFalse(text._render_meta.text_enabled)
self.assertFalse(link._render_meta.text_enabled)
link._render_meta.text_enabled = True
self.assertFalse(text._render_meta.text_enabled)
self.assertTrue(link._render_meta.text_enabled)
def test_db_table_hack(self):
# Plugin models have been moved away due to Django's AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin1
self.assertEqual(TestPlugin1._meta.db_table, 'bunch_of_plugins_testplugin1')
def test_db_table_hack_with_mixin(self):
# Plugin models have been moved away due to Django's AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin2
self.assertEqual(TestPlugin2._meta.db_table, 'bunch_of_plugins_testplugin2')
class PicturePluginTests(PluginsTestBaseCase):
def test_link_or_page(self):
"""Test a validator: you can enter a url or a page_link, but not both."""
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
picture = Picture(url="test")
# Note: don't call full_clean as it will check ALL fields - including
# the image, which we haven't defined. Call clean() instead which
# just validates the url and page_link fields.
picture.clean()
picture.page_link = page
picture.url = None
picture.clean()
picture.url = "test"
self.assertRaises(ValidationError, picture.clean)
class SimplePluginTests(TestCase):
def test_simple_naming(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
self.assertEqual(MyPlugin.name, 'My Plugin')
def test_simple_context(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
plugin = MyPlugin(ArticlePluginModel, admin.site)
context = {}
out_context = plugin.render(context, 1, 2)
self.assertEqual(out_context['instance'], 1)
self.assertEqual(out_context['placeholder'], 2)
self.assertIs(out_context, context)
class BrokenPluginTests(TestCase):
def test_import_broken_plugin(self):
"""
If there is an import error in the actual cms_plugin file it should
raise the ImportError rather than silently swallowing it -
in opposition to the ImportError if the file 'cms_plugins.py' doesn't
exist.
"""
new_apps = ['cms.test_utils.project.brokenpluginapp']
with self.settings(INSTALLED_APPS=new_apps):
plugin_pool.discovered = False
self.assertRaises(ImportError, plugin_pool.discover_plugins)
class MTIPluginsTestCase(PluginsTestBaseCase):
def test_add_edit_plugin(self):
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
"""
Test that we can instantiate and use a MTI plugin
"""
# Create a page
page = create_page("Test", "nav_playground.html", settings.LANGUAGES[0][0])
placeholder = page.placeholders.get(slot="body")
# Add the MTI plugin
add_url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "TestPluginBeta",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholder.pk,
})
data = {
'alpha': 'ALPHA',
'beta': 'BETA'
}
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(TestPluginBetaModel.objects.count(), 1)
plugin_model = TestPluginBetaModel.objects.all()[0]
self.assertEqual("ALPHA", plugin_model.alpha)
self.assertEqual("BETA", plugin_model.beta)
def test_related_name(self):
from cms.test_utils.project.mti_pluginapp.models import (
TestPluginAlphaModel, TestPluginBetaModel, ProxiedAlphaPluginModel,
ProxiedBetaPluginModel, AbstractPluginParent, TestPluginGammaModel, MixedPlugin,
LessMixedPlugin, NonPluginModel
)
# the first concrete class of the following four plugins is TestPluginAlphaModel
self.assertEqual(TestPluginAlphaModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
self.assertEqual(TestPluginBetaModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
self.assertEqual(ProxiedAlphaPluginModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
self.assertEqual(ProxiedBetaPluginModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
# Abstract plugins will have the dynamic format for related name
self.assertEqual(
AbstractPluginParent.cmsplugin_ptr.field.rel.related_name,
'%(app_label)s_%(class)s'
)
# Concrete plugin of an abstract plugin gets its relatedname
self.assertEqual(TestPluginGammaModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testplugingammamodel')
# Child plugin gets it's own related name
self.assertEqual(MixedPlugin.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_mixedplugin')
# If the child plugin inherit straight from CMSPlugin, even if composed with
# other models, gets its own related_name
self.assertEqual(LessMixedPlugin.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_lessmixedplugin')
# Non plugins are skipped
self.assertFalse(hasattr(NonPluginModel, 'cmsplugin_ptr'))
|
the-stack_0_13567 | #!/usr/bin/python2.7
from __future__ import print_function
import sys
import os
import copy
import argparse
def outputReactions(fn, sfx, rxns, db_rxns, db_enz, gf=None, v=False):
fn += "_"
fh = open(fn + sfx, "w")
if gf:
fh.write("reaction\tcomplex\tfunction\tgapfilled\tgfstep\tequation\n")
else:
fh.write("reaction\tcomplex\tfunction\tequation\n")
for r in rxns:
myEnz = db_rxns[r].enzymes
eqn = db_rxns[r].equation
if not gf:
currGF = ""
gfstep = ""
elif r in gf:
currGF = "yes\t"
gfstep = db_rxns[r].gapfill_method + "\t"
else:
currGF = "no\t"
gfstep = "\t"
if len(myEnz) == 0:
if v:
print("No enzymes found for reaction", r, file=sys.stderr)
fh.write("{}\tnull\tnull\t{}{}{}\n".format(r, currGF, gfstep, eqn))
continue
for e in myEnz:
if e not in db_enz:
if v:
print(e, "does not exist in 'enzymes'", file=sys.stderr)
fh.write("{}\t{}\tnull\t{}{}{}\n".format(r, e, currGF,
gfstep, eqn))
continue
myRoles = db_enz[e].roles
if len(myRoles) == 0:
if v:
print("No roles found for enzyme", e, file=sys.stderr)
fh.write("{}\t{}\tnull\t{}{}{}\n".format(r, e, currGF,
gfstep, eqn))
continue
for role in myRoles:
fh.write("{}\t{}\t{}\t{}{}{}\n".format(r, e, role, currGF,
gfstep, eqn))
fh.close()
def outputFlux(fn, sfx):
fh = open(fn + "_reactions_" + sfx, "w")
for rxn, val in PyFBA.lp.col_primal_hash().items():
fh.write(rxn + "\t" + str(val) + "\n")
fh.close()
parser = argparse.ArgumentParser(description="Build model from roles then gap-fill model")
parser.add_argument("functions", help="Assigned functions file")
parser.add_argument("cgfunctions", help="Closest genomes functions file")
parser.add_argument("media", help="Media file")
parser.add_argument("-o", "--outsuffix", help="Suffix for output files")
parser.add_argument("--draft", help="Output starting reactions",
action="store_true")
parser.add_argument("-v", "--verbose", help="Verbose stderr output",
action="store_true")
parser.add_argument("--dev", help="Use PyFBA dev code",
action="store_true")
args = parser.parse_args()
outsfx = args.outsuffix if args.outsuffix else "out"
if args.dev:
# Import PyFBA from absoulte path
sys.path.insert(0, os.path.expanduser("~") + "/Projects/PyFBA/")
sys.path.insert(0, os.path.expanduser("~") + "/PyFBA/")
print("IN DEV MODE", file=sys.stderr)
import PyFBA
# Load ModelSEED database
modeldata = PyFBA.parse.model_seed.parse_model_seed_data('gramnegative', verbose=True)
# Read in assigned functions file
assigned_functions = PyFBA.parse.read_assigned_functions(args.functions)
roles = set([i[0] for i in [list(j) for j in assigned_functions.values()]])
print("There are {} unique roles in this genome".format(len(roles)),
file=sys.stderr)
# Obtain dictionary of roles and their reactions
#roles_to_reactions = PyFBA.filters.roles_to_reactions(roles)
#reactions_to_run = set()
#for role in roles_to_reactions:
# reactions_to_run.update(roles_to_reactions[role])
#print("There are {}".format(len(reactions_to_run)),
# "unique reactions associated with this genome", file=sys.stderr)
# Obtain enzyme complexes from roles
complexes = PyFBA.filters.roles_to_complexes(roles)
if args.verbose:
print("There are", len(complexes["complete"]), "complete and",
len(complexes["incomplete"]), "incomplete enzyme complexes",
file=sys.stderr)
# Get reactions from only completed complexes
reactions_to_run = set()
for c in complexes["complete"]:
reactions_to_run.update(modeldata.enzymes[c].reactions)
print("There are {}".format(len(reactions_to_run)),
"unique reactions associated with this genome", file=sys.stderr)
# Remove reactions IDs that do not not have a reaction equation associated
tempset = set()
for r in reactions_to_run:
if r in modeldata.reactions:
tempset.add(r)
elif args.verbose:
print("Reaction ID {}".format(r),
"is not in our reactions list. Skipped",
file=sys.stderr)
reactions_to_run = tempset
if args.draft:
outputReactions("origreactions", outsfx,
reactions_to_run, modeldata.reactions, modeldata.enzymes, gf=None, v=args.verbose)
# Load our media
media = PyFBA.parse.read_media_file(args.media)
print("Our media has {} components".format(len(media)), file=sys.stderr)
# Define a biomass equation
biomass_equation = PyFBA.metabolism.biomass_equation('gramnegative')
# Run FBA on our media
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("Initial run has a biomass flux value of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Check to see if model needs any gap-filling
if growth:
print("Model grew without gap-filling", file=sys.stderr)
sys.exit()
# Gap-fill the model
added_reactions = []
original_reactions_to_run = copy.copy(reactions_to_run)
# Media import reactions
if not growth:
print("Adding media import reactions", file=sys.stderr)
media_reactions = PyFBA.gapfill.suggest_from_media(modeldata,
reactions_to_run, media)
added_reactions.append(("media", media_reactions))
reactions_to_run.update(media_reactions)
print("Attempting to add", len(media_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Essential reactions
if not growth:
print("Adding essential reactions", file=sys.stderr)
essential_reactions = PyFBA.gapfill.suggest_essential_reactions()
added_reactions.append(("essential", essential_reactions))
reactions_to_run.update(essential_reactions)
print("Attempting to add", len(essential_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Reactions from closely related organisms
if not growth:
print("Adding close organisms reactions", file=sys.stderr)
reactions_from_other_orgs =\
PyFBA.gapfill.suggest_from_roles(args.cgfunctions, modeldata.reactions)
added_reactions.append(("close genomes", reactions_from_other_orgs))
reactions_to_run.update(reactions_from_other_orgs)
print("Attempting to add", len(reactions_from_other_orgs), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Subsystems
if not growth:
print("Adding subsystem reactions", file=sys.stderr)
subsystem_reactions =\
PyFBA.gapfill.suggest_reactions_from_subsystems(modeldata.reactions,
reactions_to_run,
threshold=0.5)
added_reactions.append(("subsystems", subsystem_reactions))
reactions_to_run.update(subsystem_reactions)
print("Attempting to add", len(subsystem_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# EC-related reactions
if not growth:
print("Adding EC-related reactions", file=sys.stderr)
ec_reactions = PyFBA.gapfill.suggest_reactions_using_ec(roles,
modeldata.reactions,
reactions_to_run)
added_reactions.append(("ec", ec_reactions))
reactions_to_run.update(ec_reactions)
print("Attempting to add", len(ec_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Compound-probability-based reactions
if not growth:
print("Adding compound-probability-based reactions", file=sys.stderr)
probable_reactions = PyFBA.gapfill.compound_probability(modeldata.reactions,
reactions_to_run,
cutoff=0,
rxn_with_proteins=True)
added_reactions.append(("probable", probable_reactions))
reactions_to_run.update(probable_reactions)
print("Attempting to add", len(probable_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Orphan compounds
if not growth:
print("Adding orphan-compound reactions", file=sys.stderr)
orphan_reactions =\
PyFBA.gapfill.suggest_by_compound(modeldata,
reactions_to_run,
max_reactions=1)
added_reactions.append(("orphans", orphan_reactions))
reactions_to_run.update(orphan_reactions)
print("Attempting to add", len(orphan_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
if not growth:
print("UNABLE TO GAP-FILL MODEL", file=sys.stderr)
sys.exit()
# Trimming the model
reqd_additional = set()
# Begin loop through all gap-filled reactions
while added_reactions:
ori = copy.copy(original_reactions_to_run)
ori.update(reqd_additional)
# Test next set of gap-filled reactions
# Each set is based on a method described above
how, new = added_reactions.pop()
# Get all the other gap-filled reactions we need to add
for tple in added_reactions:
ori.update(tple[1])
# Use minimization function to determine the minimal
# set of gap-filled reactions from the current method
new_essential =\
PyFBA.gapfill.minimize_additional_reactions(ori,
new,
modeldata,
media,
biomass_equation)
# Record the method used to determine
# how the reaction was gap-filled
for new_r in new_essential:
modeldata.reactions[new_r].is_gapfilled = True
modeldata.reactions[new_r].gapfill_method = how
reqd_additional.update(new_essential)
# Combine old and new reactions
all_reactions = original_reactions_to_run.union(reqd_additional)
status, value, growth = PyFBA.fba.run_fba(modeldata, all_reactions,
media, biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Save flux values
outputFlux("flux", outsfx)
# Save all reactions
outputReactions("allreactions", outsfx, all_reactions, modeldata.reactions, modeldata.enzymes, reqd_additional,
args.verbose) |
the-stack_0_13570 | import unittest
import numpy as np
from slice_merge import TiledImage
class TestTiledImage(unittest.TestCase):
def test_slicing(self):
data = np.zeros((5, 5, 2))
sliced = TiledImage(data, tile_size=2, keep_rest=True)
self.assertEqual(sliced.data.shape, (3, 3, 2, 2, 2))
self.assertEqual(sliced.image().shape, data.shape)
sliced = TiledImage(data, tile_size=2, keep_rest=False)
self.assertEqual(sliced.data.shape, (2, 2, 2, 2, 2))
sliced = TiledImage(data, number_of_tiles=2, keep_rest=True)
self.assertEqual(sliced.data.shape, (2, 2, 3, 3, 2))
self.assertEqual(sliced.image().shape, data.shape)
sliced = TiledImage(data, number_of_tiles=2, keep_rest=False)
self.assertEqual(sliced.data.shape, (2, 2, 2, 2, 2))
def test_set_tile(self):
data = np.zeros((2, 2, 1))
sliced = TiledImage(data, tile_size=1, keep_rest=True)
new_tile = np.ones((1, 1, 1))
data[1, 0, 0] = 1
sliced.set_tile(1, 0, new_tile)
self.assertTrue(np.array_equal(sliced.image(), data))
self.assertTrue(np.array_equal(new_tile, sliced.get_tile(1, 0)))
def test_apply(self):
data = np.arange(25).reshape((5, 5, 1))
true_result = data**2
sliced = TiledImage(data, tile_size=2, keep_rest=True)
result = sliced.merge(sliced.apply(lambda x: x**2))
self.assertTrue(np.array_equal(result, true_result))
result = sliced.merge(sliced.apply(np.square, parallel=True))
def test_list_tiles_2d(self):
data = np.arange(25).reshape((5, 5, 1))
true_result = np.arange(4).reshape((2, 2))
sliced = TiledImage(data, 2)
self.assertEqual(sliced.list_tiles(tile_2d=True)[0].shape, true_result.shape)
def test_list_indices(self):
data = np.arange(25).reshape((5, 5, 1))
sliced = TiledImage(data, tile_size=2)
tile_indices = sliced.list_tile_indices()
tile_list = sliced.list_tiles()
tile_by_index = sliced.get_tile(*tile_indices[1])
tile_from_list = tile_list[1]
self.assertTrue(np.array_equal(tile_from_list, tile_by_index))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13573 | '''
(C) 2014-2016 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
'''
import os
import json
import logging
from uuid import uuid1
from copy import deepcopy
from threading import Semaphore, Event
from webview.localization import localization
from webview import _parse_api_js, _js_bridge_call, _convert_string, _escape_string
from webview import OPEN_DIALOG, FOLDER_DIALOG, SAVE_DIALOG
logger = logging.getLogger(__name__)
# Try importing Qt5 modules
try:
from PyQt5 import QtCore
# Check to see if we're running Qt > 5.5
from PyQt5.QtCore import QT_VERSION_STR
_qt_version = [int(n) for n in QT_VERSION_STR.split('.')]
if _qt_version >= [5, 5]:
from PyQt5.QtWebEngineWidgets import QWebEngineView as QWebView
from PyQt5.QtWebChannel import QWebChannel
else:
from PyQt5.QtWebKitWidgets import QWebView
from PyQt5.QtWidgets import QWidget, QMainWindow, QVBoxLayout, QApplication, QFileDialog, QMessageBox
from PyQt5.QtGui import QColor
logger.debug('Using Qt5')
except ImportError as e:
logger.debug('PyQt5 or one of dependencies is not found', exc_info=True)
_import_error = True
else:
_import_error = False
if _import_error:
# Try importing Qt4 modules
try:
from PyQt4 import QtCore
from PyQt4.QtWebKit import QWebView, QWebFrame
from PyQt4.QtGui import QWidget, QMainWindow, QVBoxLayout, QApplication, QDialog, QFileDialog, QMessageBox, QColor
_qt_version = [4, 0]
logger.debug('Using Qt4')
except ImportError as e:
_import_error = True
else:
_import_error = False
if _import_error:
raise Exception('This module requires PyQt4 or PyQt5 to work under Linux.')
class BrowserView(QMainWindow):
instances = {}
create_window_trigger = QtCore.pyqtSignal(object)
set_title_trigger = QtCore.pyqtSignal(str)
load_url_trigger = QtCore.pyqtSignal(str)
html_trigger = QtCore.pyqtSignal(str)
dialog_trigger = QtCore.pyqtSignal(int, str, bool, str, str)
destroy_trigger = QtCore.pyqtSignal()
fullscreen_trigger = QtCore.pyqtSignal()
current_url_trigger = QtCore.pyqtSignal()
evaluate_js_trigger = QtCore.pyqtSignal(str, str)
class JSBridge(QtCore.QObject):
api = None
parent_uid = None
try:
qtype = QtCore.QJsonValue # QT5
except AttributeError:
qtype = str # QT4
def __init__(self):
super(BrowserView.JSBridge, self).__init__()
@QtCore.pyqtSlot(str, qtype, result=str)
def call(self, func_name, param):
func_name = BrowserView._convert_string(func_name)
param = BrowserView._convert_string(param)
return _js_bridge_call(self.parent_uid, self.api, func_name, param)
def __init__(self, uid, title, url, width, height, resizable, fullscreen,
min_size, confirm_quit, background_color, debug, js_api, webview_ready):
super(BrowserView, self).__init__()
BrowserView.instances[uid] = self
self.uid = uid
self.js_bridge = BrowserView.JSBridge()
self.js_bridge.api = js_api
self.js_bridge.parent_uid = self.uid
self.is_fullscreen = False
self.confirm_quit = confirm_quit
self._file_name_semaphore = Semaphore(0)
self._current_url_semaphore = Semaphore(0)
self.load_event = Event()
self._js_results = {}
self._current_url = None
self._file_name = None
self.resize(width, height)
self.title = title
self.setWindowTitle(title)
# Set window background color
self.background_color = QColor()
self.background_color.setNamedColor(background_color)
palette = self.palette()
palette.setColor(self.backgroundRole(), self.background_color)
self.setPalette(palette)
if not resizable:
self.setFixedSize(width, height)
self.setMinimumSize(min_size[0], min_size[1])
self.view = QWebView(self)
if url is not None:
self.view.setUrl(QtCore.QUrl(url))
else:
self.load_event.set()
self.setCentralWidget(self.view)
self.create_window_trigger.connect(BrowserView.on_create_window)
self.load_url_trigger.connect(self.on_load_url)
self.html_trigger.connect(self.on_load_html)
self.dialog_trigger.connect(self.on_file_dialog)
self.destroy_trigger.connect(self.on_destroy_window)
self.fullscreen_trigger.connect(self.on_fullscreen)
self.current_url_trigger.connect(self.on_current_url)
self.evaluate_js_trigger.connect(self.on_evaluate_js)
self.set_title_trigger.connect(self.on_set_title)
if _qt_version >= [5, 5]:
self.channel = QWebChannel(self.view.page())
self.view.page().setWebChannel(self.channel)
self.view.page().loadFinished.connect(self.on_load_finished)
if fullscreen:
self.toggle_fullscreen()
self.view.setContextMenuPolicy(QtCore.Qt.NoContextMenu) # disable right click context menu
self.move(QApplication.desktop().availableGeometry().center() - self.rect().center())
self.activateWindow()
self.raise_()
webview_ready.set()
def on_set_title(self, title):
self.setWindowTitle(title)
def on_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_filter):
if dialog_type == FOLDER_DIALOG:
self._file_name = QFileDialog.getExistingDirectory(self, localization['linux.openFolder'], options=QFileDialog.ShowDirsOnly)
elif dialog_type == OPEN_DIALOG:
if allow_multiple:
self._file_name = QFileDialog.getOpenFileNames(self, localization['linux.openFiles'], directory, file_filter)
else:
self._file_name = QFileDialog.getOpenFileName(self, localization['linux.openFile'], directory, file_filter)
elif dialog_type == SAVE_DIALOG:
if directory:
save_filename = os.path.join(str(directory), str(save_filename))
self._file_name = QFileDialog.getSaveFileName(self, localization['global.saveFile'], save_filename)
self._file_name_semaphore.release()
def on_current_url(self):
url = BrowserView._convert_string(self.view.url().toString())
self._current_url = None if url == '' else url
self._current_url_semaphore.release()
def on_load_url(self, url):
self.view.setUrl(QtCore.QUrl(url))
def on_load_html(self, content):
self.view.setHtml(content, QtCore.QUrl(''))
def closeEvent(self, event):
if self.confirm_quit:
reply = QMessageBox.question(self, self.title, localization['global.quitConfirmation'],
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
event.ignore()
return
event.accept()
del BrowserView.instances[self.uid]
def on_destroy_window(self):
self.close()
def on_fullscreen(self):
if self.is_fullscreen:
self.showNormal()
else:
self.showFullScreen()
self.is_fullscreen = not self.is_fullscreen
def on_evaluate_js(self, script, uuid):
def return_result(result):
result = BrowserView._convert_string(result)
uuid_ = BrowserView._convert_string(uuid)
js_result = self._js_results[uuid_]
js_result['result'] = None if result is None or result == 'null' else result if result == '' else json.loads(result)
js_result['semaphore'].release()
escaped_script = 'JSON.stringify(eval("{0}"))'.format(_escape_string(script))
try: # PyQt4
result = self.view.page().mainFrame().evaluateJavaScript(escaped_script)
return_result(result)
except AttributeError: # PyQt5
self.view.page().runJavaScript(escaped_script, return_result)
def on_load_finished(self):
if self.js_bridge.api:
self._set_js_api()
else:
self.load_event.set()
def set_title(self, title):
self.set_title_trigger.emit(title)
def get_current_url(self):
self.load_event.wait()
self.current_url_trigger.emit()
self._current_url_semaphore.acquire()
return self._current_url
def load_url(self, url):
self.load_event.clear()
self.load_url_trigger.emit(url)
def load_html(self, content):
self.load_event.clear()
self.html_trigger.emit(content)
def create_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_filter):
self.dialog_trigger.emit(dialog_type, directory, allow_multiple, save_filename, file_filter)
self._file_name_semaphore.acquire()
if _qt_version >= [5, 0]: # QT5
if dialog_type == FOLDER_DIALOG:
file_names = (self._file_name,)
elif dialog_type == SAVE_DIALOG or not allow_multiple:
file_names = (self._file_name[0],)
else:
file_names = tuple(self._file_name[0])
else: # QT4
if dialog_type == FOLDER_DIALOG:
file_names = (BrowserView._convert_string(self._file_name),)
elif dialog_type == SAVE_DIALOG or not allow_multiple:
file_names = (BrowserView._convert_string(self._file_name[0]),)
else:
file_names = tuple([BrowserView._convert_string(s) for s in self._file_name])
# Check if we got an empty tuple, or a tuple with empty string
if len(file_names) == 0 or len(file_names[0]) == 0:
return None
else:
return file_names
def destroy_(self):
self.destroy_trigger.emit()
def toggle_fullscreen(self):
self.fullscreen_trigger.emit()
def evaluate_js(self, script):
self.load_event.wait()
result_semaphore = Semaphore(0)
unique_id = uuid1().hex
self._js_results[unique_id] = {'semaphore': result_semaphore, 'result': ''}
self.evaluate_js_trigger.emit(script, unique_id)
result_semaphore.acquire()
result = deepcopy(self._js_results[unique_id]['result'])
del self._js_results[unique_id]
return result
def _set_js_api(self):
def _register_window_object():
frame.addToJavaScriptWindowObject('external', self.js_bridge)
script = _parse_api_js(self.js_bridge.api)
if _qt_version >= [5, 5]:
qwebchannel_js = QtCore.QFile('://qtwebchannel/qwebchannel.js')
if qwebchannel_js.open(QtCore.QFile.ReadOnly):
source = bytes(qwebchannel_js.readAll()).decode('utf-8')
self.view.page().runJavaScript(source)
self.channel.registerObject('external', self.js_bridge)
qwebchannel_js.close()
elif _qt_version >= [5, 0]:
frame = self.view.page().mainFrame()
_register_window_object()
else:
frame = self.view.page().mainFrame()
_register_window_object()
try: # PyQt4
self.view.page().mainFrame().evaluateJavaScript(script)
except AttributeError: # PyQt5
self.view.page().runJavaScript(script)
self.load_event.set()
@staticmethod
def _convert_string(result):
try:
if result is None or result.isNull():
return None
result = result.toString() # QJsonValue conversion
except AttributeError:
pass
return _convert_string(result)
@staticmethod
# Receive func from subthread and execute it on the main thread
def on_create_window(func):
func()
def create_window(uid, title, url, width, height, resizable, fullscreen, min_size,
confirm_quit, background_color, debug, js_api, webview_ready):
app = QApplication.instance() or QApplication([])
def _create():
browser = BrowserView(uid, title, url, width, height, resizable, fullscreen,
min_size, confirm_quit, background_color, debug, js_api,
webview_ready)
browser.show()
if uid == 'master':
_create()
app.exec_()
else:
i = list(BrowserView.instances.values())[0] # arbitrary instance
i.create_window_trigger.emit(_create)
def set_title(title, uid):
BrowserView.instances[uid].set_title(title)
def get_current_url(uid):
return BrowserView.instances[uid].get_current_url()
def load_url(url, uid):
BrowserView.instances[uid].load_url(url)
def load_html(content, uid):
BrowserView.instances[uid].load_html(content)
def destroy_window(uid):
BrowserView.instances[uid].destroy_()
def toggle_fullscreen(uid):
BrowserView.instances[uid].toggle_fullscreen()
def create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types):
# Create a file filter by parsing allowed file types
file_types = [s.replace(';', ' ') for s in file_types]
file_filter = ';;'.join(file_types)
i = list(BrowserView.instances.values())[0]
return i.create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_filter)
def evaluate_js(script, uid):
return BrowserView.instances[uid].evaluate_js(script)
|
the-stack_0_13575 | #!/usr/bin/env python
from translate.misc import autoencode
from py import test
class TestAutoencode:
type2test = autoencode.autoencode
def test_default_encoding(self):
"""tests that conversion to string uses the encoding attribute"""
s = self.type2test(u'unicode string', 'utf-8')
assert s.encoding == 'utf-8'
assert str(s) == 'unicode string'
s = self.type2test(u'\u20ac')
assert str(self.type2test(u'\u20ac', 'utf-8')) == '\xe2\x82\xac'
def test_uniqueness(self):
"""tests constructor creates unique objects"""
s1 = unicode(u'unicode string')
s2 = unicode(u'unicode string')
assert s1 == s2
assert s1 is s2
s1 = self.type2test(u'unicode string', 'utf-8')
s2 = self.type2test(u'unicode string', 'ascii')
s3 = self.type2test(u'unicode string', 'utf-8')
assert s1 == s2 == s3
assert s1 is not s2
# even though all the attributes are the same, this is a mutable type
# so the objects created must be different
assert s1 is not s3
def test_bad_encoding(self):
"""tests that we throw an exception if we don't know the encoding"""
assert test.raises(ValueError, self.type2test, 'text', 'some-encoding')
|
the-stack_0_13578 | import discord
import asyncio
import io
import aiohttp
import utils
from random_generators import RandomMessageGenerator
from base_client import BaseClient
class DiscordClient(discord.Client, BaseClient):
def __init__(self, discord_config, send_handler):
discord.Client.__init__(self)
self.channel = discord_config.channel_id
self.token = discord_config.client_token
self.send_handler = send_handler
async def on_message(self, message):
if message.author == self.user:
return
if message.channel.id != self.channel:
return
try:
if message.content != None and len(message.content) > 0:
text = (
utils.format_message(
message.author.name + RandomMessageGenerator.get_random_said()
)
+ message.content
)
else:
text = None
if len(message.attachments) > 0:
urls = [a.url for a in message.attachments]
else:
urls = None
self.send_handler(self.get_client_name(), text, urls)
except Exception as e:
print(e)
async def on_ready(self):
print("We have logged in as {0.user}".format(self))
def send_message(self, text=None, urls=None):
self.loop.create_task(
self.send_message_in_loop(self.get_channel(self.channel), text, urls)
)
async def send_message_in_loop(self, channel, message=None, files=None):
try:
if files is not None:
for file in files:
async with aiohttp.ClientSession() as session:
async with session.get(file) as resp:
if resp.status != 200:
return await channel.send("Could not download file...")
data = io.BytesIO(await resp.read())
await channel.send(
file=discord.File(data, "cool_image.png")
)
if message is not None:
await channel.send(message)
except Exception as e:
print(e)
@staticmethod
def get_client_name():
return "Discord"
def is_threadable(self) -> bool:
return False
def run_client(self, *args):
token = args[0]
self.run(token)
def get_run_args(self):
return self.token
|
the-stack_0_13581 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import pandas as pd
import os
import unittest
from coremltools._deps import HAS_SKLEARN
from coremltools.converters.sklearn import convert
from coremltools.models.utils import evaluate_classifier,\
evaluate_classifier_with_probabilities, macos_version, is_macos
if HAS_SKLEARN:
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class GlmCassifierTest(unittest.TestCase):
def test_logistic_regression_binary_classification_with_string_labels(self):
self._conversion_and_evaluation_helper_for_logistic_regression(['Foo', 'Bar'])
def test_logistic_regression_multiclass_classification_with_int_labels(self):
self._conversion_and_evaluation_helper_for_logistic_regression([1,2,3,4])
@staticmethod
def _generate_random_data(labels):
import random
random.seed(42)
# Generate some random data
x, y = [], []
for _ in range(100):
x.append([random.gauss(2,3), random.gauss(-1,2)])
y.append(random.choice(labels))
return x, y
def _conversion_and_evaluation_helper_for_logistic_regression(self, class_labels):
options = {
'C': (0.1, 1., 2.),
'fit_intercept': (True, False),
'class_weight': ('balanced', None),
'solver': ('newton-cg', 'lbfgs', 'liblinear', 'sag')
}
# Generate a list of all combinations of options and the default parameters
product = itertools.product(*options.values())
args = [{}] + [dict(zip(options.keys(), p)) for p in product]
x, y = GlmCassifierTest._generate_random_data(class_labels)
column_names = ['x1', 'x2']
df = pd.DataFrame(x, columns=column_names)
for cur_args in args:
print(class_labels, cur_args)
cur_model = LogisticRegression(**cur_args)
cur_model.fit(x, y)
spec = convert(cur_model, input_features=column_names,
output_feature_names='target')
if is_macos() and macos_version() >= (10, 13):
probability_lists = cur_model.predict_proba(x)
df['classProbability'] = [dict(zip(cur_model.classes_, cur_vals)) for cur_vals in probability_lists]
metrics = evaluate_classifier_with_probabilities(spec, df, probabilities='classProbability', verbose=False)
self.assertEquals(metrics['num_key_mismatch'], 0)
self.assertLess(metrics['max_probability_error'], 0.00001)
def test_linear_svc_binary_classification_with_string_labels(self):
self._conversion_and_evaluation_helper_for_linear_svc(['Foo', 'Bar'])
def test_linear_svc_multiclass_classification_with_int_labels(self):
self._conversion_and_evaluation_helper_for_linear_svc([1,2,3,4])
def _conversion_and_evaluation_helper_for_linear_svc(self, class_labels):
ARGS = [ {},
{'C' : .75, 'loss': 'hinge'},
{'penalty': 'l1', 'dual': False},
{'tol': 0.001, 'fit_intercept': False},
{'intercept_scaling': 1.5}
]
x, y = GlmCassifierTest._generate_random_data(class_labels)
column_names = ['x1', 'x2']
df = pd.DataFrame(x, columns=column_names)
for cur_args in ARGS:
print(class_labels, cur_args)
cur_model = LinearSVC(**cur_args)
cur_model.fit(x, y)
spec = convert(cur_model, input_features=column_names,
output_feature_names='target')
if is_macos() and macos_version() >= (10, 13):
df['prediction'] = cur_model.predict(x)
cur_eval_metics = evaluate_classifier(spec, df, verbose=False)
self.assertEquals(cur_eval_metics['num_errors'], 0)
|
the-stack_0_13583 | import tensorflow as tf
import pickle
from model import Model
from utils import build_dict, build_train_draft_dataset, test_batch_iter
import os
with open("args.pickle", "rb") as f:
args = pickle.load(f)
print("Loading dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("test", args.toy)
print("Loading test dataset...")
title_list, test_x = build_train_draft_dataset(word_dict, article_max_len)
test_x_len = [len([y for y in x if y != 0]) for x in test_x]
with tf.Session() as sess:
print("Loading saved model...")
model = Model(reversed_dict, article_max_len, summary_max_len, args, forward_only=True)
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state("./saved_model/")
saver.restore(sess, ckpt.model_checkpoint_path)
story_test_result_list = []
if args.use_atten:
print ("Using Attention")
else:
print ("Not Using Attention")
for index in range(len(test_x)):
print ("testing %d out of %d" % (index, len(test_x)))
inputs = test_x[index]
batches = test_batch_iter(inputs, [0] * len(test_x), args.batch_size, 1)
result = []
for batch_x, _ in batches:
batch_x_len = [len([y for y in x if y != 0]) for x in batch_x]
test_feed_dict = {
model.batch_size: len(batch_x),
model.X: batch_x,
model.X_len: batch_x_len,
}
prediction = sess.run(model.prediction, feed_dict=test_feed_dict)
prediction_output = [[reversed_dict[y] for y in x] for x in prediction[:, 0, :]]
predict_story = ""
for line in prediction_output:
summary = list()
for word in line:
if word == "</s>":
break
if word not in summary:
summary.append(word)
predict_story = predict_story+" ".join(summary)+"<split>"
predict_story = "[{}]{}".format(title_list[index], predict_story)
story_test_result_list.append(predict_story)
if not os.path.exists("result"):
os.mkdir("result")
with open("result/train.txt", "wr") as f:
for story in story_test_result_list:
f.write(story+"\n")
print('Summaries are saved to "train.txt"...')
|
the-stack_0_13585 | import importlib
import inspect
import json
import logging
import os
import re
import sys
import zipfile
from pathlib import Path
import click
def add_to_dict_if_exists(options_dict, initial_dict={}):
for k, v in options_dict.items():
if v:
initial_dict[k] = v
return initial_dict
def convert_to_set(iterable):
if not isinstance(iterable, set):
return set(iterable)
return iterable
def extract_zip(source, dest):
with zipfile.ZipFile(source, 'r') as zip_ref:
zip_ref.extractall(dest)
def generate_path_str(*args):
if not args:
return
path = None
for arg in args:
if not path:
path = Path(arg)
else:
path /= arg
return str(path)
def is_dir(d):
return os.path.isdir(d)
def is_envvar_true(value):
return value in (True, 'True', 'true', '1')
def is_file(f):
return os.path.isfile(f)
def import_all_modules_in_directory(plugins_init_file, existing_commands):
try:
spec = importlib.util.spec_from_file_location('plugins_modules', plugins_init_file)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
import plugins_modules
from plugins_modules import __all__ as all_plugins_modules
for module in all_plugins_modules:
_module = getattr(plugins_modules, module)
if isinstance(_module, (click.core.Command, click.core.Group)):
existing_commands.add(_module)
except ImportError:
logging.warning(
f'{inspect.stack()[0][3]}; will skip loading plugin: {module}', exc_info=True
)
def make_dirs(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except FileExistsError:
logging.warning(f'{inspect.stack()[0][3]}; will ignore FileExistsError')
def path_exists(file):
if os.path.exists(file):
sys.exit(f'error: {file} already exists')
def paths_exist(files):
for file in files:
path_exists(file)
def read_file(file, type='text'):
with open(file, 'r') as f:
if type == 'json':
return json.loads(f.read())
return f.read()
def remove_file_above_size(file, size_kb=100):
if os.path.getsize(file) > size_kb * 1024:
os.remove(file)
def remove_last_items_from_list(init_list, integer=0):
if integer <= 0:
return init_list
return init_list[:-integer]
def resolve_target_directory(target_directory=None):
if target_directory:
if not os.path.exists(target_directory):
os.makedirs(target_directory)
return str(Path(target_directory).resolve())
return os.getcwd()
def run_func_on_dir_files(dir, func, glob='**/*', args=(), kwargs={}):
state = []
for file_path in Path(resolve_target_directory(dir)).resolve().glob(glob):
_tuple = (str(file_path),)
result = func(*(_tuple + args), **kwargs)
if result:
state.append(result)
return state
def run_func_on_iterable(iterable, func, state_op='append', args=(), kwargs={}):
state = []
for item in iterable:
_tuple = (item,)
result = func(*(_tuple + args), **kwargs)
if result:
getattr(state, state_op)(result)
return state
def show_message(msg):
print(msg)
def split_path(path, delimiter='[/\\\\]'):
return re.split(delimiter, path)
def touch(path):
try:
with open(path, 'x'):
os.utime(path, None)
except FileNotFoundError:
os.makedirs(os.path.split(path)[0])
except FileExistsError:
logging.warning(f'{inspect.stack()[0][3]}; will ignore FileExistsError')
def write_file(content, path, fs_write=True, indent=None, eof=True):
if not fs_write:
return
touch(path)
with open(path, 'w') as f:
if isinstance(content, str):
if eof:
content = f'{content}\n'
f.write(content)
elif isinstance(content, dict) or isinstance(content, list):
if isinstance(indent, int):
content = f'{json.dumps(content, indent=indent)}'
else:
content = f'{json.dumps(content)}'
if eof:
f.write(f'{content}\n')
else:
f.write(content)
def write_zip(file, content):
touch(file)
with open(file, 'wb') as f:
f.write(content)
|
the-stack_0_13588 | from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error
"""
线性回归:梯度下降法
:return:None
"""
# 1.获取数据
data = load_boston()
# 2.数据集划分
x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, random_state=22)
# 3.特征工程-标准化
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.fit_transform(x_test)
# 4.机器学习-线性回归(特征方程)
estimator = SGDRegressor(max_iter=1000)
estimator.fit(x_train, y_train)
# 5.模型评估
# 5.1 获取系数等值
y_predict = estimator.predict(x_test)
print("预测值为:\n", y_predict)
print("模型中的系数为:\n", estimator.coef_)
print("模型中的偏置为:\n", estimator.intercept_)
# 5.2 评价
# 均方误差
error = mean_squared_error(y_test, y_predict)
print("误差为:\n", error) |
the-stack_0_13589 |
import os.path
import clr
project_dir = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(os.path.join(project_dir, "..", "TestStack.White.0.13.3\\lib\\net40\\"))
sys.path.append(os.path.join(project_dir, "..", "Castle.Core.3.3.0\\lib\\net40-client\\"))
clr.AddReferenceByName('TestStack.White')
from TestStack.White.UIItems.Finders import *
from TestStack.White.InputDevices import Keyboard
from TestStack.White.WindowsAPI import KeyboardInput
clr.AddReferenceByName('UIAutomationTypes, Version=4.0.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35')
from System.Windows.Automation import ControlType
class GroupHelper:
def __init__(self, app):
self.app = app
def create(self, name):
modal = self.open_group_editor()
modal.Get(SearchCriteria.ByAutomationId("uxNewAddressButton")).Click()
modal.Get(SearchCriteria.ByControlType(ControlType.Edit)).Enter(name)
Keyboard.Instance.PressSpecialKey(KeyboardInput.SpecialKeys.RETURN)
self.close_group_editor(modal)
def count(self):
self.open_group_editor()
return len(self.get_group_list())
def get_group_list(self):
modal = self.open_group_editor()
tree = modal.Get(SearchCriteria.ByAutomationId("uxAddressTreeView"))
root = tree.Nodes[0]
l = [node.Text for node in root.Nodes]
self.close_group_editor(modal)
return l
def open_group_editor(self):
main_window = self.app.main_window
main_window.Get(SearchCriteria.ByAutomationId("groupButton")).Click()
modal = main_window.ModalWindow("Group editor")
return modal
def close_group_editor(self, modal):
modal.Get(SearchCriteria.ByAutomationId("uxCloseAddressButton")).Click()
def delete_first(self):
modal = self.open_group_editor()
tree = modal.Get(SearchCriteria.ByAutomationId("uxAddressTreeView"))
root = tree.Nodes[0]
root.Nodes[0].Select()
modal.Get(SearchCriteria.ByAutomationId("uxDeleteAddressButton")).Click()
modal.Get(SearchCriteria.ByAutomationId("uxOKAddressButton")).Click()
self.close_group_editor(modal)
|
the-stack_0_13591 | import json
from pyaltherma.const import VALID_RESPONSE_CODES
from pyaltherma.errors import PathException, AlthermaResponseException
def query_object(o, json_path, raise_exception=False, convert_to_none=True):
location_steps = json_path.split('/')
if isinstance(o, str):
o = json.loads(o)
for idx, step in enumerate(location_steps):
if step not in o:
if raise_exception:
raise PathException(f'{json_path} step: {step} not found in object')
if idx == len(location_steps) - 1 and convert_to_none:
return None
o = o.get(step, {})
return o
def assert_response(request, response):
resp_code = query_object(response, 'm2m:rsp/rsc')
if resp_code not in VALID_RESPONSE_CODES:
raise AlthermaResponseException(f'Response code {resp_code} is invalid.')
|
the-stack_0_13594 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
flags.DEFINE_integer("batch_size", 16, "Batch size.")
flags.DEFINE_string("model_dir", None, "Model directory")
flags.DEFINE_integer("tf_random_seed", None,
"Random seed for tensorflow")
flags.DEFINE_integer("num_eval_steps", None,
"Number of steps to take during evaluation.")
flags.DEFINE_integer("num_train_steps", None,
"Number of steps to take during training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"Number of steps between checkpoint saves.")
flags.DEFINE_integer("eval_throttle_secs", 600,
"Minimum number of seconds to wait between evaluations")
flags.DEFINE_integer("eval_start_delay_secs", 120,
"Number of seconds to wait before starting evaluations.")
flags.DEFINE_integer("keep_checkpoint_max", 5,
"Max number of checkpoints to keep")
FLAGS = flags.FLAGS
def run_experiment(model_fn, train_input_fn, eval_input_fn, exporters=None):
"""Run experiment."""
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
tf_random_seed=FLAGS.tf_random_seed,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max)
estimator = tf.estimator.Estimator(
config=run_config,
model_fn=model_fn,
model_dir=FLAGS.model_dir)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=FLAGS.num_train_steps)
eval_spec = tf.estimator.EvalSpec(
name="default",
input_fn=eval_input_fn,
exporters=exporters,
start_delay_secs=FLAGS.eval_start_delay_secs,
throttle_secs=FLAGS.eval_throttle_secs,
steps=FLAGS.num_eval_steps)
tf.logging.set_verbosity(tf.logging.INFO)
tf.estimator.train_and_evaluate(
estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec)
|
the-stack_0_13596 | import os
import re
from dataclasses import dataclass
from pathlib import PurePath
from typing import Awaitable, List, Optional, Pattern, Set, Union
from urllib.parse import urljoin
from bs4 import BeautifulSoup, Tag
from ..config import Config
from ..logging import ProgressBar, log
from ..output_dir import FileSink
from ..utils import soupify
from .crawler import CrawlError
from .http_crawler import HttpCrawler, HttpCrawlerSection
class KitIpdCrawlerSection(HttpCrawlerSection):
def target(self) -> str:
target = self.s.get("target")
if not target:
self.missing_value("target")
if not target.startswith("https://"):
self.invalid_value("target", target, "Should be a URL")
return target
def link_regex(self) -> Pattern[str]:
regex = self.s.get("link_regex", r"^.*/[^/]*\.(?:pdf|zip|c|java)$")
return re.compile(regex)
@dataclass(unsafe_hash=True)
class KitIpdFile:
name: str
url: str
@dataclass
class KitIpdFolder:
name: str
files: List[KitIpdFile]
def explain(self) -> None:
log.explain_topic(f"Folder {self.name!r}")
for file in self.files:
log.explain(f"File {file.name!r}")
def __hash__(self) -> int:
return self.name.__hash__()
class KitIpdCrawler(HttpCrawler):
def __init__(
self,
name: str,
section: KitIpdCrawlerSection,
config: Config,
):
super().__init__(name, section, config)
self._url = section.target()
self._file_regex = section.link_regex()
async def _run(self) -> None:
maybe_cl = await self.crawl(PurePath("."))
if not maybe_cl:
return
tasks: List[Awaitable[None]] = []
async with maybe_cl:
for item in await self._fetch_items():
if isinstance(item, KitIpdFolder):
tasks.append(self._crawl_folder(item))
else:
# Orphan files are placed in the root folder
tasks.append(self._download_file(PurePath("."), item))
await self.gather(tasks)
async def _crawl_folder(self, folder: KitIpdFolder) -> None:
path = PurePath(folder.name)
if not await self.crawl(path):
return
tasks = [self._download_file(path, file) for file in folder.files]
await self.gather(tasks)
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
element_path = parent / file.name
maybe_dl = await self.download(element_path)
if not maybe_dl:
return
async with maybe_dl as (bar, sink):
await self._stream_from_url(file.url, sink, bar)
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
page = await self.get_page()
elements: List[Tag] = self._find_file_links(page)
items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
for element in elements:
folder_label = self._find_folder_label(element)
if folder_label:
folder = self._extract_folder(folder_label)
if folder not in items:
items.add(folder)
folder.explain()
else:
file = self._extract_file(element)
items.add(file)
log.explain_topic(f"Orphan file {file.name!r}")
log.explain("Attributing it to root folder")
return items
def _extract_folder(self, folder_tag: Tag) -> KitIpdFolder:
files: List[KitIpdFile] = []
name = folder_tag.getText().strip()
container: Tag = folder_tag.findNextSibling(name="table")
for link in self._find_file_links(container):
files.append(self._extract_file(link))
return KitIpdFolder(name, files)
@staticmethod
def _find_folder_label(file_link: Tag) -> Optional[Tag]:
enclosing_table: Tag = file_link.findParent(name="table")
if enclosing_table is None:
return None
return enclosing_table.findPreviousSibling(name=re.compile("^h[1-6]$"))
def _extract_file(self, link: Tag) -> KitIpdFile:
url = self._abs_url_from_link(link)
name = os.path.basename(url)
return KitIpdFile(name, url)
def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> List[Tag]:
return tag.findAll(name="a", attrs={"href": self._file_regex})
def _abs_url_from_link(self, link_tag: Tag) -> str:
return urljoin(self._url, link_tag.get("href"))
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar) -> None:
async with self.session.get(url, allow_redirects=False) as resp:
if resp.status == 403:
raise CrawlError("Received a 403. Are you within the KIT network/VPN?")
if resp.content_length:
bar.set_total(resp.content_length)
async for data in resp.content.iter_chunked(1024):
sink.file.write(data)
bar.advance(len(data))
sink.done()
async def get_page(self) -> BeautifulSoup:
async with self.session.get(self._url) as request:
return soupify(await request.read())
|
the-stack_0_13597 | import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_proxy_next(self):
arr = [4, 5, 6]
def iterator_func():
yield from arr
it = iterator_func()
class IteratesWeakly:
def __iter__(self):
return weakref.proxy(it)
weak_it = IteratesWeakly()
# Calls proxy.__next__
self.assertEqual(list(weak_it), [4, 5, 6])
def test_proxy_bad_next(self):
# bpo-44720: PyIter_Next() shouldn't be called if the reference
# isn't an iterator.
not_an_iterator = lambda: 0
class A:
def __iter__(self):
return weakref.proxy(not_an_iterator)
a = A()
msg = "Weakref proxy referenced a non-iterator"
with self.assertRaisesRegex(TypeError, msg):
list(a)
def test_proxy_reversed(self):
class MyObj:
def __len__(self):
return 3
def __reversed__(self):
return iter('cba')
obj = MyObj()
self.assertEqual("".join(reversed(weakref.proxy(obj))), "cba")
def test_proxy_hash(self):
class MyObj:
def __hash__(self):
return 42
obj = MyObj()
with self.assertRaises(TypeError):
hash(weakref.proxy(obj))
class MyObj:
__hash__ = None
obj = MyObj()
with self.assertRaises(TypeError):
hash(weakref.proxy(obj))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_valued_union_operators(self):
a = C()
b = C()
c = C()
wvd1 = weakref.WeakValueDictionary({1: a})
wvd2 = weakref.WeakValueDictionary({1: b, 2: a})
wvd3 = wvd1.copy()
d1 = {1: c, 3: b}
pairs = [(5, c), (6, b)]
tmp1 = wvd1 | wvd2 # Between two WeakValueDictionaries
self.assertEqual(dict(tmp1), dict(wvd1) | dict(wvd2))
self.assertIs(type(tmp1), weakref.WeakValueDictionary)
wvd1 |= wvd2
self.assertEqual(wvd1, tmp1)
tmp2 = wvd2 | d1 # Between WeakValueDictionary and mapping
self.assertEqual(dict(tmp2), dict(wvd2) | d1)
self.assertIs(type(tmp2), weakref.WeakValueDictionary)
wvd2 |= d1
self.assertEqual(wvd2, tmp2)
tmp3 = wvd3.copy() # Between WeakValueDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wvd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakValueDictionary)
tmp4 = d1 | wvd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wvd3))
self.assertIs(type(tmp4), weakref.WeakValueDictionary)
del a
self.assertNotIn(2, tmp1)
self.assertNotIn(2, tmp2)
self.assertNotIn(1, tmp3)
self.assertNotIn(1, tmp4)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
o1 = C()
o2 = C()
o3 = C()
wkd1 = weakref.WeakKeyDictionary({o1: 1, o2: 2})
wkd2 = weakref.WeakKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: '5', o3: '6'}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), weakref.WeakKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), weakref.WeakKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), weakref.WeakKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
the-stack_0_13598 | """
Test basic properties of implemented datasets.
"""
import unittest
import numpy as np
from classicdata import Ionosphere
from classicdata.dataset import Dataset, CitationWarning
class TestLoading(unittest.TestCase):
def test_loading(self):
with self.assertWarns(CitationWarning):
for DatasetImplementation in Dataset.__subclasses__():
with self.subTest(Dataset=DatasetImplementation):
dataset_instance = DatasetImplementation()
# `.loaded` should not be set.
self.assertFalse(dataset_instance.loaded)
# Number of points and number of features should be correctly defined.
self.assertEqual(
dataset_instance.points.shape,
(dataset_instance.n_samples, dataset_instance.n_features),
)
# Number of labels must be defined correctly.
self.assertEqual(
dataset_instance.labels.shape, (dataset_instance.n_samples,)
)
# Convert labels “there and back”.
recoded_labels = dataset_instance.label_encoder.transform(
dataset_instance.decode_labels(dataset_instance.labels)
)
self.assertTrue(np.all(recoded_labels == dataset_instance.labels))
# `.loaded` should be set.
self.assertTrue(dataset_instance.loaded)
# Count random split.
(
train_points,
train_labels,
test_points,
test_labels,
) = dataset_instance.split_for_training()
self.assertEqual(
train_points.shape[0] + test_points.shape[0],
dataset_instance.n_samples,
)
self.assertEqual(
train_labels.shape[0] + test_labels.shape[0],
dataset_instance.n_samples,
)
def test_zero_test_split(self):
dataset = Ionosphere() # Arbitrarily chosen.
dataset.load()
(
train_points,
train_labels,
test_points,
test_labels,
) = dataset.split_for_training(test_size=0)
self.assertEqual(train_points.shape[0], dataset.n_samples)
self.assertEqual(train_labels.shape[0], dataset.n_samples)
self.assertEqual(test_points.shape[0], 0)
self.assertEqual(test_labels.shape[0], 0)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_13600 | '''
Provides various utility functions.
This file is part of RTSLib.
Copyright (c) 2011-2013 by Datera, Inc
Copyright (c) 2011-2014 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os
import re
import six
import socket
import stat
import subprocess
import uuid
from contextlib import contextmanager
import pyudev
_CONTEXT = pyudev.Context()
class RTSLibError(Exception):
'''
Generic rtslib error.
'''
pass
class RTSLibALUANotSupported(RTSLibError):
'''
Backend does not support ALUA.
'''
pass
class RTSLibBrokenLink(RTSLibError):
'''
Broken link in configfs, i.e. missing LUN storage object.
'''
pass
class RTSLibNotInCFS(RTSLibError):
'''
The underlying configfs object does not exist. Happens when
calling methods of an object that is instantiated but have
been deleted from congifs, or when trying to lookup an
object that does not exist.
'''
pass
def fwrite(path, string):
'''
This function writes a string to a file, and takes care of
opening it and closing it. If the file does not exist, it
will be created.
>>> from rtslib.utils import *
>>> fwrite("/tmp/test", "hello")
>>> fread("/tmp/test")
'hello'
@param path: The file to write to.
@type path: string
@param string: The string to write to the file.
@type string: string
'''
with open(path, 'w') as file_fd:
file_fd.write(str(string))
def fread(path):
'''
This function reads the contents of a file.
It takes care of opening and closing it.
>>> from rtslib.utils import *
>>> fwrite("/tmp/test", "hello")
>>> fread("/tmp/test")
'hello'
>>> fread("/tmp/notexistingfile") # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IOError: [Errno 2] No such file or directory: '/tmp/notexistingfile'
@param path: The path to the file to read from.
@type path: string
@return: A string containing the file's contents.
'''
with open(path, 'r') as file_fd:
return file_fd.read().strip()
def is_dev_in_use(path):
'''
This function will check if the device or file referenced by path is
already mounted or used as a storage object backend. It works by trying to
open the path with O_EXCL flag, which will fail if someone else already
did. Note that the file is closed before the function returns, so this
does not guaranteed the device will still be available after the check.
@param path: path to the file of device to check
@type path: string
@return: A boolean, True is we cannot get exclusive descriptor on the path,
False if we can.
'''
path = os.path.realpath(str(path))
try:
file_fd = os.open(path, os.O_EXCL|os.O_NDELAY)
except OSError:
return True
else:
os.close(file_fd)
return False
def _get_size_for_dev(device):
'''
@param device: the device
@type device: pyudev.Device
@return: the size in logical blocks, 0 if none found
@rtype: int
'''
attributes = device.attributes
try:
sect_size = attributes.asint('size')
except (KeyError, UnicodeDecodeError, ValueError):
return 0
try:
logical_block_size = attributes.asint('queue/logical_block_size')
except (KeyError, UnicodeDecodeError, ValueError):
return 0
return (sect_size * 512) // logical_block_size
def get_size_for_blk_dev(path):
'''
@param path: The path to a block device
@type path: string
@return: The size in logical blocks of the device
@raises: DeviceNotFoundError if corresponding device not found
@raises: EnvironmentError, ValueError in some situations
'''
device = Device.from_device_file(_CONTEXT, os.path.realpath(str(path)))
return _get_size_for_dev(device)
get_block_size = get_size_for_blk_dev
def get_size_for_disk_name(name):
'''
@param name: a kernel disk name, as found in /proc/partitions
@type name: string
@return: The size in logical blocks of a disk-type block device.
@raises: DeviceNotFoundError
'''
# size is in 512-byte sectors, we want to return number of logical blocks
def get_size(name):
"""
:param str name: name of block device
:raises DeviceNotFoundError: if device not found
"""
device = pyudev.Device.from_name(_CONTEXT, 'block', name)
return _get_size_for_dev(device)
# Disk names can include '/' (e.g. 'cciss/c0d0') but these are changed to
# '!' when listed in /sys/block.
# in pyudev 0.19 it should no longer be necessary to swap '/'s in name
name = name.replace("/", "!")
try:
return get_size(name)
except pyudev.DeviceNotFoundError:
# Maybe it's a partition?
m = re.search(r'^([a-z0-9_\-!]+?)(\d+)$', name)
if m:
# If disk name ends with a digit, Linux sticks a 'p' between it and
# the partition number in the blockdev name.
disk = m.groups()[0]
if disk[-1] == 'p' and disk[-2].isdigit():
disk = disk[:-1]
return get_size(m.group())
else:
raise
def get_blockdev_type(path):
'''
This function returns a block device's type.
Example: 0 is TYPE_DISK
If no match is found, None is returned.
>>> from rtslib.utils import *
>>> get_blockdev_type("/dev/sda")
0
>>> get_blockdev_type("/dev/sr0")
5
>>> get_blockdev_type("/dev/scd0")
5
>>> get_blockdev_type("/dev/nodevicehere") is None
True
@param path: path to the block device
@type path: string
@return: An int for the block device type, or None if not a block device.
'''
try:
device = pyudev.Device.from_device_file(_CONTEXT, path)
except (pyudev.DeviceNotFoundError, EnvironmentError, ValueError):
return None
if device.subsystem != u'block':
return None
attributes = device.attributes
disk_type = 0
try:
disk_type = attributes.asint('device/type')
except (KeyError, UnicodeDecodeError, ValueError):
pass
return disk_type
get_block_type = get_blockdev_type
def convert_scsi_path_to_hctl(path):
'''
This function returns the SCSI ID in H:C:T:L form for the block
device being mapped to the udev path specified.
If no match is found, None is returned.
>>> import rtslib.utils as utils
>>> utils.convert_scsi_path_to_hctl('/dev/scd0')
(2, 0, 0, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sr0')
(2, 0, 0, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sda')
(3, 0, 0, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sda1')
>>> utils.convert_scsi_path_to_hctl('/dev/sdb')
(3, 0, 1, 0)
>>> utils.convert_scsi_path_to_hctl('/dev/sdc')
(3, 0, 2, 0)
@param path: The udev path to the SCSI block device.
@type path: string
@return: An (host, controller, target, lun) tuple of integer
values representing the SCSI ID of the device, or raise RTSLibError.
'''
try:
path = os.path.realpath(path)
device = pyudev.Device.from_device_file(_CONTEXT, path)
parent = device.find_parent(subsystem='scsi')
return [int(data) for data in parent.sys_name.split(':')]
except:
raise RTSLibError("Could not convert scsi path to hctl")
def convert_scsi_hctl_to_path(host, controller, target, lun):
'''
This function returns a udev path pointing to the block device being
mapped to the SCSI device that has the provided H:C:T:L.
>>> import rtslib.utils as utils
>>> utils.convert_scsi_hctl_to_path(0,0,0,0)
''
>>> utils.convert_scsi_hctl_to_path(2,0,0,0) # doctest: +ELLIPSIS
'/dev/s...0'
>>> utils.convert_scsi_hctl_to_path(3,0,2,0)
'/dev/sdc'
@param host: The SCSI host id.
@type host: int
@param controller: The SCSI controller id.
@type controller: int
@param target: The SCSI target id.
@type target: int
@param lun: The SCSI Logical Unit Number.
@type lun: int
@return: A string for the canonical path to the device, or raise RTSLibError.
'''
try:
host = int(host)
controller = int(controller)
target = int(target)
lun = int(lun)
except ValueError:
raise RTSLibError(
"The host, controller, target and lun parameter must be integers")
hctl = [host, controller, target, lun]
try:
scsi_device = pyudev.Device.from_name(_CONTEXT, 'scsi', ':'.join(hctl))
except pyudev.DeviceNotFoundError:
raise RTSLibError("Could not find path for SCSI hctl")
devices = _CONTEXT.list_devices(
subsystem='block',
parent=scsi_device
)
path = next((dev.device_node for dev in devices), '')
if path == None:
raise RTSLibError("Could not find path for SCSI hctl")
return path
def generate_wwn(wwn_type):
'''
Generates a random WWN of the specified type:
- unit_serial: T10 WWN Unit Serial.
- iqn: iSCSI IQN
- naa: SAS NAA address
@param wwn_type: The WWN address type.
@type wwn_type: str
@returns: A string containing the WWN.
'''
wwn_type = wwn_type.lower()
if wwn_type == 'free':
return str(uuid.uuid4())
if wwn_type == 'unit_serial':
return str(uuid.uuid4())
elif wwn_type == 'iqn':
localname = socket.gethostname().split(".")[0]
localarch = os.uname()[4].replace("_", "")
prefix = "iqn.2003-01.org.linux-iscsi.%s.%s" % (localname, localarch)
prefix = prefix.strip().lower()
serial = "sn.%s" % str(uuid.uuid4())[24:]
return "%s:%s" % (prefix, serial)
elif wwn_type == 'naa':
# see http://standards.ieee.org/develop/regauth/tut/fibre.pdf
# 5 = IEEE registered
# 001405 = OpenIB OUI (they let us use it I guess?)
# rest = random
return "naa.5001405" + uuid.uuid4().hex[-9:]
elif wwn_type == 'eui':
return "eui.001405" + uuid.uuid4().hex[-10:]
else:
raise ValueError("Unknown WWN type: %s" % wwn_type)
def colonize(str):
'''
helper function to add colons every 2 chars
'''
return ":".join(str[i:i+2] for i in range(0, len(str), 2))
def _cleanse_wwn(wwn_type, wwn):
'''
Some wwns may have alternate text representations. Adjust to our
preferred representation.
'''
wwn = str(wwn.strip()).lower()
if wwn_type in ('naa', 'eui', 'ib'):
if wwn.startswith("0x"):
wwn = wwn[2:]
wwn = wwn.replace("-", "")
wwn = wwn.replace(":", "")
if not (wwn.startswith("naa.") or wwn.startswith("eui.") or \
wwn.startswith("ib.")):
wwn = wwn_type + "." + wwn
return wwn
def normalize_wwn(wwn_types, wwn):
'''
Take a WWN as given by the user and convert it to a standard text
representation.
Returns (normalized_wwn, wwn_type), or exception if invalid wwn.
'''
wwn_test = {
'free': lambda wwn: True,
'iqn': lambda wwn: \
re.match("iqn\.[0-9]{4}-[0-1][0-9]\..*\..*", wwn) \
and not re.search(' ', wwn) \
and not re.search('_', wwn),
'naa': lambda wwn: re.match("naa\.[125][0-9a-fA-F]{15}$", wwn),
'eui': lambda wwn: re.match("eui\.[0-9a-f]{16}$", wwn),
'ib': lambda wwn: re.match("ib\.[0-9a-f]{32}$", wwn),
'unit_serial': lambda wwn: \
re.match("[0-9A-Fa-f]{8}(-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$", wwn),
}
for wwn_type in wwn_types:
clean_wwn = _cleanse_wwn(wwn_type, wwn)
found_type = wwn_test[wwn_type](clean_wwn)
if found_type:
break
else:
raise RTSLibError("WWN not valid as: %s" % ", ".join(wwn_types))
return (clean_wwn, wwn_type)
def list_loaded_kernel_modules():
'''
List all currently loaded kernel modules
'''
return [line.split(" ")[0] for line in
fread("/proc/modules").split('\n') if line]
def modprobe(module):
'''
Load the specified kernel module if needed.
@param module: The name of the kernel module to be loaded.
@type module: str
'''
if module in list_loaded_kernel_modules():
return
try:
import kmod
except ImportError:
process = subprocess.Popen(("modprobe", module),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
if process.returncode != 0:
raise RTSLibError(stderrdata)
return
try:
kmod.Kmod().modprobe(module)
except kmod.error.KmodError:
raise RTSLibError("Could not load module: %s" % module)
def mount_configfs():
if not os.path.ismount("/sys/kernel/config"):
cmdline = "mount -t configfs none /sys/kernel/config"
process = subprocess.Popen(cmdline.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
if process.returncode != 0 and not os.path.ismount(
"/sys/kernel/config"):
raise RTSLibError("Cannot mount configfs")
def dict_remove(d, items):
for item in items:
if item in d:
del d[item]
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
#
# These two functions are meant to be used with functools.partial and
# properties.
#
# 'ignore=True' will silently return None if the attribute is not present.
# This is good for attributes only present in some kernel versions.
#
# All curried arguments should be keyword args.
#
# These should only be used for attributes that follow the convention of
# "NULL" having a special sentinel value, such as auth attributes, and
# that return a string.
#
def _get_auth_attr(self, attribute, ignore=False):
self._check_self()
path = "%s/%s" % (self.path, attribute)
try:
value = fread(path)
except:
if not ignore:
raise
return None
if value == "NULL":
return ''
else:
return value
# Auth params take the string "NULL" to unset the attribute
def _set_auth_attr(self, value, attribute, ignore=False):
self._check_self()
path = "%s/%s" % (self.path, attribute)
value = value.strip()
if value == "NULL":
raise RTSLibError("'NULL' is not a permitted value")
if len(value) > 255:
raise RTSLibError("Value longer than maximum length of 255")
if value == '':
value = "NULL"
try:
fwrite(path, "%s" % value)
except:
if not ignore:
raise
def set_attributes(obj, attr_dict, err_func):
for name, value in six.iteritems(attr_dict):
try:
obj.set_attribute(name, value)
except RTSLibError as e:
err_func(str(e))
def set_parameters(obj, param_dict, err_func):
for name, value in six.iteritems(param_dict):
try:
obj.set_parameter(name, value)
except RTSLibError as e:
err_func(str(e))
def _test():
'''Run the doctests'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
the-stack_0_13603 | from . import SentenceEvaluator, SimilarityFunction
import logging
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
from typing import List
from ..readers import InputExample
logger = logging.getLogger(__name__)
class TripletEvaluator(SentenceEvaluator):
"""
Evaluate a model based on a triplet: (sentence, positive_example, negative_example).
Checks if distance(sentence, positive_example) < distance(sentence, negative_example).
"""
def __init__(
self,
anchors: List[str],
positives: List[str],
negatives: List[str],
main_distance_function: SimilarityFunction = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
):
"""
:param anchors: Sentences to check similarity to. (e.g. a query)
:param positives: List of positive sentences
:param negatives: List of negative sentences
:param main_distance_function: One of 0 (Cosine), 1 (Euclidean) or 2 (Manhattan). Defaults to None, returning all 3.
:param name: Name for the output
:param batch_size: Batch size used to compute embeddings
:param show_progress_bar: If true, prints a progress bar
:param write_csv: Write results to a CSV file
"""
self.anchors = anchors
self.positives = positives
self.negatives = negatives
self.name = name
assert len(self.anchors) == len(self.positives)
assert len(self.anchors) == len(self.negatives)
self.main_distance_function = main_distance_function
self.batch_size = batch_size
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file: str = "triplet_evaluation" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy_cosinus", "accuracy_manhatten", "accuracy_euclidean"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
anchors = []
positives = []
negatives = []
for example in examples:
anchors.append(example.texts[0])
positives.append(example.texts[1])
negatives.append(example.texts[2])
return cls(anchors, positives, negatives, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("TripletEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
num_triplets = 0
num_correct_cos_triplets, num_correct_manhatten_triplets, num_correct_euclidean_triplets = 0, 0, 0
embeddings_anchors = model.encode(
self.anchors, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
embeddings_positives = model.encode(
self.positives, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
embeddings_negatives = model.encode(
self.negatives, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
# Cosine distance
pos_cos_distance = paired_cosine_distances(embeddings_anchors, embeddings_positives)
neg_cos_distances = paired_cosine_distances(embeddings_anchors, embeddings_negatives)
# Manhattan
pos_manhattan_distance = paired_manhattan_distances(embeddings_anchors, embeddings_positives)
neg_manhattan_distances = paired_manhattan_distances(embeddings_anchors, embeddings_negatives)
# Euclidean
pos_euclidean_distance = paired_euclidean_distances(embeddings_anchors, embeddings_positives)
neg_euclidean_distances = paired_euclidean_distances(embeddings_anchors, embeddings_negatives)
for idx in range(len(pos_cos_distance)):
num_triplets += 1
if pos_cos_distance[idx] < neg_cos_distances[idx]:
num_correct_cos_triplets += 1
if pos_manhattan_distance[idx] < neg_manhattan_distances[idx]:
num_correct_manhatten_triplets += 1
if pos_euclidean_distance[idx] < neg_euclidean_distances[idx]:
num_correct_euclidean_triplets += 1
accuracy_cos = num_correct_cos_triplets / num_triplets
accuracy_manhattan = num_correct_manhatten_triplets / num_triplets
accuracy_euclidean = num_correct_euclidean_triplets / num_triplets
logger.info("Accuracy Cosine Distance: \t{:.2f}".format(accuracy_cos * 100))
logger.info("Accuracy Manhattan Distance:\t{:.2f}".format(accuracy_manhattan * 100))
logger.info("Accuracy Euclidean Distance:\t{:.2f}\n".format(accuracy_euclidean * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhattan, accuracy_euclidean])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhattan, accuracy_euclidean])
if self.main_distance_function == SimilarityFunction.COSINE:
return accuracy_cos
if self.main_distance_function == SimilarityFunction.MANHATTAN:
return accuracy_manhattan
if self.main_distance_function == SimilarityFunction.EUCLIDEAN:
return accuracy_euclidean
return max(accuracy_cos, accuracy_manhattan, accuracy_euclidean)
|
the-stack_0_13605 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import math
import time
import tempfile
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Linear, Pool2D
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from predictor_utils import PredictorTools
SEED = 2020
IMAGENET1000 = 1281167
base_lr = 0.001
momentum_rate = 0.9
l2_decay = 1e-4
# NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout.
batch_size = 2
epoch_num = 1
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
program_translator = ProgramTranslator()
if fluid.is_compiled_with_cuda():
fluid.set_flags({'FLAGS_cudnn_deterministic': True})
def optimizer_setting(parameter_list=None):
optimizer = fluid.optimizer.Momentum(
learning_rate=base_lr,
momentum=momentum_rate,
regularization=fluid.regularizer.L2Decay(l2_decay),
parameter_list=parameter_list)
return optimizer
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
self._batch_norm = BatchNorm(num_filters, act=act)
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self, num_channels, num_filters, stride, shortcut=True):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu')
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu')
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 4,
filter_size=1,
act=None)
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=stride)
self.shortcut = shortcut
self._num_channels_out = num_filters * 4
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2)
layer_helper = fluid.layer_helper.LayerHelper(
self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNet(fluid.dygraph.Layer):
def __init__(self, layers=50, class_dim=102):
super(ResNet, self).__init__()
self.layers = layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_channels = [64, 256, 512, 1024]
num_filters = [64, 128, 256, 512]
self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
self.pool2d_max = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.bottleneck_block_list = []
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels[block]
if i == 0 else num_filters[block] * 4,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut))
self.bottleneck_block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
stdv = 1.0 / math.sqrt(2048 * 1.0)
self.out = Linear(
self.pool2d_avg_output,
class_dim,
act='softmax',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
def forward(self, inputs):
y = self.conv(inputs)
y = self.pool2d_max(y)
for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y)
y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])
pred = self.out(y)
return pred
def reader_decorator(reader):
def __reader__():
for item in reader():
img = np.array(item[0]).astype('float32').reshape(3, 224, 224)
label = np.array(item[1]).astype('int64').reshape(1)
yield img, label
return __reader__
class ResNetHelper:
def __init__(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_save_dir = os.path.join(self.temp_dir.name, 'inference')
self.model_save_prefix = os.path.join(self.model_save_dir, 'resnet')
self.model_filename = 'resnet' + INFER_MODEL_SUFFIX
self.params_filename = 'resnet' + INFER_PARAMS_SUFFIX
self.dy_state_dict_save_path = os.path.join(self.temp_dir.name,
'resnet.dygraph')
def __del__(self):
self.temp_dir.cleanup()
def train(self, to_static, build_strategy=None):
"""
Tests model decorated by `dygraph_to_static_output` in static mode. For users, the model is defined in dygraph mode and trained in static mode.
"""
with fluid.dygraph.guard(place):
np.random.seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),
batch_size=batch_size,
drop_last=True)
data_loader = fluid.io.DataLoader.from_generator(
capacity=5, iterable=True)
data_loader.set_sample_list_generator(train_reader)
resnet = ResNet()
if to_static:
resnet = paddle.jit.to_static(
resnet, build_strategy=build_strategy)
optimizer = optimizer_setting(parameter_list=resnet.parameters())
for epoch in range(epoch_num):
total_loss = 0.0
total_acc1 = 0.0
total_acc5 = 0.0
total_sample = 0
for batch_id, data in enumerate(data_loader()):
start_time = time.time()
img, label = data
pred = resnet(img)
loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(x=loss)
acc_top1 = fluid.layers.accuracy(
input=pred, label=label, k=1)
acc_top5 = fluid.layers.accuracy(
input=pred, label=label, k=5)
avg_loss.backward()
optimizer.minimize(avg_loss)
resnet.clear_gradients()
total_loss += avg_loss
total_acc1 += acc_top1
total_acc5 += acc_top5
total_sample += 1
end_time = time.time()
if batch_id % 2 == 0:
print( "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" % \
( epoch, batch_id, total_loss.numpy() / total_sample, \
total_acc1.numpy() / total_sample, total_acc5.numpy() / total_sample, end_time-start_time))
if batch_id == 10:
if to_static:
fluid.dygraph.jit.save(resnet,
self.model_save_prefix)
else:
fluid.dygraph.save_dygraph(
resnet.state_dict(),
self.dy_state_dict_save_path)
# avoid dataloader throw abort signaal
data_loader._reset()
break
return total_loss.numpy()
def predict_dygraph(self, data):
program_translator.enable(False)
with fluid.dygraph.guard(place):
resnet = ResNet()
model_dict, _ = fluid.dygraph.load_dygraph(
self.dy_state_dict_save_path)
resnet.set_dict(model_dict)
resnet.eval()
pred_res = resnet(fluid.dygraph.to_variable(data))
return pred_res.numpy()
def predict_static(self, data):
paddle.enable_static()
exe = fluid.Executor(place)
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
self.model_save_dir,
executor=exe,
model_filename=self.model_filename,
params_filename=self.params_filename)
pred_res = exe.run(inference_program,
feed={feed_target_names[0]: data},
fetch_list=fetch_targets)
return pred_res[0]
def predict_dygraph_jit(self, data):
with fluid.dygraph.guard(place):
resnet = fluid.dygraph.jit.load(self.model_save_prefix)
resnet.eval()
pred_res = resnet(data)
return pred_res.numpy()
def predict_analysis_inference(self, data):
output = PredictorTools(self.model_save_dir, self.model_filename,
self.params_filename, [data])
out = output()
return out
class TestResnet(unittest.TestCase):
def setUp(self):
self.resnet_helper = ResNetHelper()
def train(self, to_static):
program_translator.enable(to_static)
return self.resnet_helper.train(to_static)
def verify_predict(self):
image = np.random.random([1, 3, 224, 224]).astype('float32')
dy_pre = self.resnet_helper.predict_dygraph(image)
st_pre = self.resnet_helper.predict_static(image)
dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
predictor_pre = self.resnet_helper.predict_analysis_inference(image)
self.assertTrue(
np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
self.assertTrue(
np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre))
self.assertTrue(
np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(predictor_pre,
st_pre))
def test_resnet(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(
np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(static_loss,
dygraph_loss))
self.verify_predict()
def test_in_static_mode_mkldnn(self):
fluid.set_flags({'FLAGS_use_mkldnn': True})
try:
if paddle.fluid.core.is_compiled_with_mkldnn():
self.resnet_helper.train(to_static=True)
finally:
fluid.set_flags({'FLAGS_use_mkldnn': False})
if __name__ == '__main__':
# switch into new eager mode
with fluid.framework._test_eager_guard():
unittest.main()
|
the-stack_0_13606 |
import argparse
import json
import os
import sys
from random import shuffle
from multiprocessing.pool import ThreadPool
from functools import partial
import io
from tqdm import tqdm
# Assumes ai4eutils is on the path (github.com/Microsoft/ai4eutils)
from write_html_image_list import write_html_image_list
#from data_management.megadb.schema import sequences_schema_check
from data_management.megadb.megadb_utils import MegadbUtils
from visualization import visualization_utils as vis_utils
def render_image_info(rendering, args):
blob_service = rendering['blob_service']
image_obj = io.BytesIO()
try:
_ = blob_service.get_blob_to_stream(rendering['container_name'], rendering['blob_path'], image_obj)
except Exception as e:
print(f'Image not found in blob storage: {rendering["blob_path"]}')
print(e)
return
# resize is for displaying them more quickly
image = vis_utils.resize_image(vis_utils.open_image(image_obj), args.output_image_width)
vis_utils.render_megadb_bounding_boxes(rendering['bbox'], image)
annotated_img_name = rendering['annotated_img_name']
annotated_img_path = os.path.join(args.output_dir, 'rendered_images', annotated_img_name)
image.save(annotated_img_path)
def visualize_sequences(datasets_table, sequences, args):
num_images = 0
images_html = []
rendering_info = []
for seq in sequences:
if 'images' not in seq:
continue
# dataset and seq_id are required fields
dataset_name = seq['dataset']
seq_id = seq['seq_id']
# sort the images in the sequence
images_in_seq = sorted(seq['images'], key=lambda x: x['frame_num']) if len(seq['images']) > 1 else seq['images']
for im in images_in_seq:
if args.trim_to_images_bboxes_labeled and 'bbox' not in im:
continue
num_images += 1
blob_path = MegadbUtils.get_full_path(datasets_table, dataset_name, im['file'])
frame_num = im.get('frame_num', -1)
im_class = im.get('class', None)
if im_class is None: # if no class label on the image, show the class label on the sequence
im_class = seq.get('class', [])
rendering = {}
rendering['blob_service'] = MegadbUtils.get_blob_service(datasets_table, dataset_name)
rendering['container_name'] = datasets_table[dataset_name]['container']
rendering['blob_path'] = blob_path
rendering['bbox'] = im.get('bbox', [])
annotated_img_name = 'anno_' + blob_path.replace('/', args.pathsep_replacement).replace('\\', args.pathsep_replacement)
rendering['annotated_img_name'] = annotated_img_name
rendering_info.append(rendering)
images_html.append({
'filename': 'rendered_images/{}'.format(annotated_img_name),
'title': 'Seq ID: {}. Frame number: {}<br/> Image file: {}<br/> number of boxes: {}, image class labels: {}'.format(seq_id, frame_num, blob_path, len(rendering['bbox']), im_class),
'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
})
if num_images >= args.num_to_visualize:
print('num_images visualized is {}'.format(num_images))
break
# pool = ThreadPool()
render_image_info_partial = partial(render_image_info, args=args)
# print('len of rendering_info', len(rendering_info))
# tqdm(pool.imap_unordered(render_image_info_partial, rendering_info), total=len(rendering_info))
for rendering in tqdm(rendering_info):
render_image_info_partial(rendering)
print('Making HTML...')
html_path = os.path.join(args.output_dir, 'index.html')
# options = write_html_image_list()
# options['headerHtml']
write_html_image_list(
filename=html_path,
images=images_html
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('megadb_entries', type=str, help='Path to a json list of MegaDB entries')
parser.add_argument('output_dir', action='store', type=str,
help='Output directory for html and rendered images')
parser.add_argument('--trim_to_images_bboxes_labeled', action='store_true',
help='Only include images that have been sent for bbox labeling (but may be actually empty). Turn this on if QAing annotations.')
parser.add_argument('--num_to_visualize', action='store', type=int, default=200,
help='Number of images to visualize (all comformant images in a sequence are shown, so may be a few more than specified). Sequences are shuffled. Defaults to 200. Use -1 to visualize all.')
parser.add_argument('--pathsep_replacement', action='store', type=str, default='~',
help='Replace path separators in relative filenames with another character (default ~)')
parser.add_argument('-w', '--output_image_width', type=int,
help=('an integer indicating the desired width in pixels of the output annotated images. '
'Use -1 to not resize.'),
default=700)
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
assert 'COSMOS_ENDPOINT' in os.environ and 'COSMOS_KEY' in os.environ
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'rendered_images'))
print('Connecting to MegaDB to get the datasets table...')
megadb_utils = MegadbUtils()
datasets_table = megadb_utils.get_datasets_table()
print('Loading the MegaDB entries...')
with open(args.megadb_entries) as f:
sequences = json.load(f)
print('Total number of sequences: {}'.format(len(sequences)))
# print('Checking that the MegaDB entries conform to the schema...')
# sequences_schema_check.sequences_schema_check(sequences)
shuffle(sequences)
visualize_sequences(datasets_table, sequences, args)
if __name__ == '__main__':
main()
|
the-stack_0_13607 | from flask import g, request
from pprint import pformat
from requests.exceptions import ConnectionError
from requests.models import Request
import logging
import requests
import sys
import time
def getLogger(*args, **kwargs):
logger = logging.getLogger(*args, **kwargs)
return CustomLogger(logger=logger)
class CustomLogger(object):
def __init__(self, logger):
self.logger = logger
def __getattr__(self, attr):
return getattr(self.logger, attr)
def debug(self, *args, **kwargs):
return self._add_correlation_id_and_log('debug', args, kwargs)
def info(self, *args, **kwargs):
return self._add_correlation_id_and_log('info', args, kwargs)
def warn(self, *args, **kwargs):
return self._add_correlation_id_and_log('warn', args, kwargs)
def error(self, *args, **kwargs):
return self._add_correlation_id_and_log('error', args, kwargs)
def critical(self, *args, **kwargs):
return self._add_correlation_id_and_log('critical', args, kwargs)
def _add_correlation_id_and_log(self, level_name, args, kwargs):
my_extra = {**kwargs.get('extra', {})}
try:
my_extra['correlation_id'] = g.correlation_id
except RuntimeError:
# outside flask application context, this is OK
pass
kwargs['extra'] = my_extra
log_fn = getattr(self.logger, level_name)
return log_fn(*args, **kwargs)
def exception(self, *args, **kwargs):
if 'extra' in kwargs:
my_extra = kwargs['extra'].copy()
else:
my_extra = {}
my_extra['exception'] = pformat(sys.exc_info()[1])
my_extra['correlation_id'] = g.correlation_id
kwargs['extra'] = my_extra
self.logger.exception(*args, **kwargs)
def logged_response(logger, endpoint_name, endpoint_version):
def _log_response(target):
def wrapper(*args, **kwargs):
start_time = time.time()
log_level = logger.getEffectiveLevel()
source = request.access_route[0]
label = '[%s] %s %s from %s' % (g.correlation_id, request.method,
request.full_path, source)
extra = {'request.method': request.method,
'request.endpoint_name': endpoint_name,
'request.endpoint_version': endpoint_version,
'request.path': request.path,
'request.full_path': request.full_path,
'request.source': source}
if log_level <= logging.DEBUG:
extra['request.body'] = pformat(request.json)
extra['request.headers'] = pformat(dict(request.headers))
try:
result = target(*args, **kwargs)
except Exception as e:
logger.exception("%s while handling %s",
e.__class__.__name__, label,
extra=extra)
raise
names = ('body', 'status_code', 'headers')
levels = (logging.DEBUG, logging.INFO, logging.DEBUG)
formatters = (pformat, pformat, lambda x: pformat(dict(x)))
for value, name, level, formatter in zip(
result, names, levels, formatters):
if log_level <= level:
extra['response.%s' % name] = formatter(value)
end_time = time.time()
extra['response.took'] = int((end_time - start_time) * 1000)
logger.info("Responding %s to %s",
result[1], label,
extra=extra)
return result
return wrapper
return _log_response
def _log_request(target, kind):
def wrapper(*args, **kwargs):
logger = kwargs.get('logger', getLogger(__name__))
if 'logger' in kwargs:
del kwargs['logger']
log_level = logger.getEffectiveLevel()
kwargs_for_constructor = get_args_for_request_constructor(kwargs)
request = Request(kind.upper(), *args, **kwargs_for_constructor)
extra = {'request.url': request.url,
'request.method': kind.upper()}
if log_level <= logging.DEBUG:
extra['request.body'] = pformat(request.data)
extra['request.headers'] = pformat(dict(request.headers))
extra['request.params'] = request.params
label = '%s %s' % (kind.upper(), request.url)
try:
response = target(*args, **kwargs)
except ConnectionError:
# exception should be logged elsewhere
raise
except Exception:
logger.exception("Exception while sending %s", label,
extra=extra)
raise
extra['response.status_code'] = response.status_code
if log_level <= logging.DEBUG:
extra['response.text'] = pformat(response.text)
extra['response.headers'] = pformat(dict(response.headers))
return response
return wrapper
def get_args_for_request_constructor(kwargs):
kwargs_for_constructor = kwargs.copy()
if 'timeout' in kwargs_for_constructor:
# timout is an argument to requests.get/post/ect but not
# Request.__init__
del kwargs_for_constructor['timeout']
return kwargs_for_constructor
class LoggedRequest(object):
def __getattr__(self, name):
return _log_request(getattr(requests, name), name)
logged_request = LoggedRequest()
|
the-stack_0_13608 | # Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import sys
from mock import call, MagicMock, patch, PropertyMock
import pytest
from six import PY2
from sagemaker_training import entry_point, environment, errors, process, runner
builtins_open = "__builtin__.open" if PY2 else "builtins.open"
@pytest.fixture
def entry_point_type_module():
with patch("os.listdir", lambda x: ("setup.py",)):
yield
@pytest.fixture(autouse=True)
def entry_point_type_script():
with patch("os.listdir", lambda x: ()):
yield
@pytest.fixture()
def has_requirements():
with patch("os.path.exists", lambda x: x.endswith("requirements.txt")):
yield
@patch("sagemaker_training.modules.prepare")
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_module(check_error, prepare, entry_point_type_module):
path = "c://sagemaker-pytorch-container"
entry_point.install("python_module.py", path)
cmd = [sys.executable, "-m", "pip", "install", "."]
check_error.assert_called_with(cmd, errors.InstallModuleError, capture_error=False, cwd=path)
with patch("os.path.exists", return_value=True):
entry_point.install("python_module.py", path)
check_error.assert_called_with(
cmd + ["-r", "requirements.txt"],
errors.InstallModuleError,
cwd=path,
capture_error=False,
)
@patch("sagemaker_training.modules.prepare")
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_script(check_error, prepare, entry_point_type_module, has_requirements):
path = "c://sagemaker-pytorch-container"
entry_point.install("train.py", path)
with patch("os.path.exists", return_value=True):
entry_point.install(path, "python_module.py")
@patch("sagemaker_training.modules.prepare")
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_fails(check_error, prepare, entry_point_type_module):
check_error.side_effect = errors.ClientError()
with pytest.raises(errors.ClientError):
entry_point.install("git://aws/container-support", "script")
@patch("sagemaker_training.modules.prepare")
@patch("sys.executable", None)
@patch("sagemaker_training.process.check_error", autospec=True)
def test_install_no_python_executable(
check_error, prepare, has_requirements, entry_point_type_module
):
with pytest.raises(RuntimeError) as e:
entry_point.install("train.py", "git://aws/container-support")
assert str(e.value) == "Failed to retrieve the real path for the Python executable binary"
@patch("os.chmod")
@patch("sagemaker_training.process.check_error", autospec=True)
@patch("socket.gethostbyname")
def test_script_entry_point_with_python_package(
gethostbyname, check_error, chmod, entry_point_type_module
):
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://dummy-uri",
user_entry_point="train.sh",
args=["dummy_arg"],
runner_type=runner_mock,
)
chmod.assert_called_with(os.path.join(environment.code_dir, "train.sh"), 511)
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("sagemaker_training.process.check_error", autospec=True)
@patch("socket.gethostbyname")
def test_run_module_wait(gethostbyname, check_error, chmod, download_and_extract):
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://url",
user_entry_point="launcher.sh",
args=["42"],
capture_error=True,
runner_type=runner_mock,
)
download_and_extract.assert_called_with(uri="s3://url", path=environment.code_dir)
runner_mock.run.assert_called_with(True, True)
chmod.assert_called_with(os.path.join(environment.code_dir, "launcher.sh"), 511)
@patch("sagemaker_training.files.download_and_extract")
@patch("sagemaker_training.modules.install")
@patch.object(
environment.Environment, "hosts", return_value=["algo-1", "algo-2"], new_callable=PropertyMock
)
@patch("socket.gethostbyname")
def test_run_calls_hostname_resolution(gethostbyname, install, hosts, download_and_extract):
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://url", user_entry_point="launcher.py", args=["42"], runner_type=runner_mock
)
gethostbyname.assert_called_with("algo-2")
gethostbyname.assert_any_call("algo-1")
@patch("sagemaker_training.files.download_and_extract")
@patch("sagemaker_training.modules.install")
@patch.object(
environment.Environment, "hosts", return_value=["algo-1", "algo-2"], new_callable=PropertyMock
)
@patch("socket.gethostbyname")
def test_run_waits_hostname_resolution(gethostbyname, hosts, install, download_and_extract):
gethostbyname.side_effect = [ValueError(), ValueError(), True, True]
runner_mock = MagicMock(spec=process.ProcessRunner)
entry_point.run(
uri="s3://url", user_entry_point="launcher.py", args=["42"], runner_type=runner_mock
)
gethostbyname.assert_has_calls([call("algo-1"), call("algo-1"), call("algo-1"), call("algo-2")])
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("socket.gethostbyname")
def test_run_module_no_wait(gethostbyname, chmod, download_and_extract):
runner_mock = MagicMock(spec=process.ProcessRunner)
module_name = "default_user_module_name"
entry_point.run(
uri="s3://url",
user_entry_point=module_name,
args=["42"],
wait=False,
runner_type=runner_mock,
)
runner_mock.run.assert_called_with(False, False)
@patch("sys.path")
@patch("sagemaker_training.runner.get")
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("socket.gethostbyname")
def test_run_module_with_env_vars(gethostbyname, chmod, download_and_extract, get_runner, sys_path):
module_name = "default_user_module_name"
args = ["--some-arg", "42"]
entry_point.run(
uri="s3://url", user_entry_point=module_name, args=args, env_vars={"FOO": "BAR"}
)
expected_env_vars = {"FOO": "BAR", "PYTHONPATH": ""}
get_runner.assert_called_with(
runner.ProcessRunnerType, module_name, args, expected_env_vars, None
)
@patch("sys.path")
@patch("sagemaker_training.runner.get")
@patch("sagemaker_training.files.download_and_extract")
@patch("os.chmod")
@patch("socket.gethostbyname")
def test_run_module_with_extra_opts(
gethostbyname, chmod, download_and_extract, get_runner, sys_path
):
module_name = "default_user_module_name"
args = ["--some-arg", "42"]
extra_opts = {"foo": "bar"}
entry_point.run(uri="s3://url", user_entry_point=module_name, args=args, extra_opts=extra_opts)
get_runner.assert_called_with(runner.ProcessRunnerType, module_name, args, {}, extra_opts)
|
the-stack_0_13610 | from statistics import mean
import numpy as np
import matplotlib.pylab as plt
from matplotlib import style
style.use("fivethirtyeight")
#dtype important later
xs = np.array([1,2,3,4,5,6],dtype=np.float64)
ys = np.array([5,4,6,5,6,7],dtype=np.float64)
plt.scatter(xs,ys)
plt.show()
def best_fit_slope(xs,ys):
m = (mean(xs)*mean(ys)-mean(xs*ys))/(mean(xs)**2.-mean(xs**2.))
return m
m=best_fit_slope(xs,ys)
print(m)
########################PEMDAS
plt.scatter(xs,ys)
plt.plot(xs,m*xs,color='C1')
plt.show()
b=mean(ys)-m*mean(xs)
plt.scatter(xs,ys)
plt.plot(xs,m*xs+b,color='C1')
plt.show()
def best_fit_slope_and_intercept(xs,ys):
m = (mean(xs)*mean(ys)-mean(xs*ys))/(mean(xs)**2.-mean(xs**2.))
b=mean(ys)-m*mean(xs)
return m,b
m,b=best_fit_slope_and_intercept(xs,ys)
plt.scatter(xs,ys)
plt.plot(xs,m*xs+b,color='C1')
plt.show()
regression_line=[(m*x)+b for x in xs]
print(m,b)
predicted_x=8
predicted_y=m*predicted_x+b
plt.scatter(xs,ys)
plt.plot(xs,m*xs+b,color='C1')
plt.scatter(predicted_x,predicted_y, color='C2')
plt.show()
|
the-stack_0_13611 | # -*- coding: utf-8 -*-
import re
from scrapy import Spider, Request
from dateutil import parser
from artbot_scraper.items import EventItem
from pytz import timezone
class AmbushSpider(Spider):
name = "Goodspace"
allowed_domains = ["goodspace.co"]
start_urls = ["http://goodspace.co/upcoming/"]
def parse(self, response):
for href in response.xpath('//a[contains(@class, "project")]/@href'):
url = response.urljoin(href.extract())
yield Request(url, callback=self.parse_event)
def parse_event(self, response):
item = EventItem()
item['url'] = response.url
item['venue'] = self.name
item['title'] = response.xpath('//h1/text()').extract_first().strip()
item['description'] = ''.join(response.xpath('//div[contains(@class, "event_details")]//text()').extract())
item['image'] = response.xpath('//figure[contains(@class, "amb_gal_img")]//img/@src').extract_first()
time = ''.join(response.xpath('//time//text()').extract())
match = re.match('(?P<start>[a-zA-Z]+\d+)(?P<end>[a-zA-Z]+\d+)', time)
if (match):
tz = timezone('Australia/Sydney')
item['start'] = tz.localize(parser.parse(match.group('start')))
item['end'] = tz.localize(parser.parse(match.group('end')))
yield item
|
the-stack_0_13613 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from threading import Event
from typing import IO, Any, Optional, Union
import libvirt # type: ignore
from . import libvirt_events_thread
# Reads serial console log from libvirt VM and writes it to a file.
class QemuConsoleLogger:
def __init__(self) -> None:
self._stream_completed = Event()
self._console_stream: Optional[libvirt.virStream] = None
self._console_stream_callback_started = False
self._console_stream_callback_added = False
self._log_file: Optional[IO[Any]] = None
# Attach logger to a libvirt VM.
def attach(
self,
qemu_conn: libvirt.virConnect,
domain: libvirt.virDomain,
log_file_path: str,
) -> None:
# Open the log file.
self._log_file = open(log_file_path, "ab")
# Open the libvirt console stream.
console_stream = qemu_conn.newStream(libvirt.VIR_STREAM_NONBLOCK)
domain.openConsole(
None,
console_stream,
libvirt.VIR_DOMAIN_CONSOLE_FORCE | libvirt.VIR_DOMAIN_CONSOLE_SAFE,
)
self._console_stream = console_stream
libvirt_events_thread.run_callback(self._register_console_callbacks)
self._console_stream_callback_started = True
# Close the logger.
def close(self) -> None:
# Check if attach() run successfully.
if self._console_stream_callback_started:
# Close the stream on libvirt callbacks thread.
libvirt_events_thread.run_callback(self._close_stream, True)
self._stream_completed.wait()
else:
if self._console_stream:
self._console_stream.abort()
if self._log_file:
self._log_file.close()
# Wait until the stream closes.
# Typically used when gracefully shutting down a VM.
def wait_for_close(self) -> None:
if self._console_stream_callback_started:
self._stream_completed.wait()
# Register the console stream events.
# Threading: Must only be called on libvirt events thread.
def _register_console_callbacks(self) -> None:
# Attach callback for stream events.
assert self._console_stream
self._console_stream.eventAddCallback(
libvirt.VIR_STREAM_EVENT_READABLE
| libvirt.VIR_STREAM_EVENT_ERROR
| libvirt.VIR_STREAM_EVENT_HANGUP,
self._stream_event,
None,
)
self._console_stream_callback_added = True
# Handles events for the console stream.
# Threading: Must only be called on libvirt events thread.
def _stream_event(
self, stream: libvirt.virStream, events: Union[int, bytes], context: Any
) -> None:
if events & libvirt.VIR_STREAM_EVENT_READABLE:
# Data is available to be read.
while True:
data = stream.recv(libvirt.virStorageVol.streamBufSize)
if data == -2:
# No more data available at the moment.
break
if len(data) == 0:
# EOF reached.
self._close_stream(False)
break
assert self._log_file
self._log_file.write(data)
if (
events & libvirt.VIR_STREAM_EVENT_ERROR
or events & libvirt.VIR_STREAM_EVENT_HANGUP
):
# Stream is shutting down. So, close it.
self._close_stream(True)
# Close the stream resource.
# Threading: Must only be called on libvirt events thread.
def _close_stream(self, abort: bool) -> None:
if self._stream_completed.is_set():
# Already closed. Nothing to do.
return
try:
# Close the log file
assert self._log_file
self._log_file.close()
# Close the stream
assert self._console_stream
if self._console_stream_callback_added:
self._console_stream.eventRemoveCallback()
if abort:
self._console_stream.abort()
else:
self._console_stream.finish()
finally:
# Signal that the stream has closed.
self._stream_completed.set()
|
the-stack_0_13614 | cont = ('zero', 'um', 'dois', 'três', 'quatro',
'cinco', 'seis', 'sete', 'oito', 'nove',
'dez', 'onze', 'doze', 'treze', 'catorze',
'quinze', 'dezesseis', 'dezessete', 'dezoito',
'dezenove', 'vinte')
while True:
núm = int(input('Digite um número entre 0 e 20:\n'))
if 0 <= núm <= 20:
break
print('Tente novamente.' , end='')
print(f'Você digitou o número {cont[núm]}')
|
the-stack_0_13616 | import json
import shutil
import re
from os.path import exists,join, realpath
import os
file_name = 'compile_commands.json'
with open(file_name, 'r') as f:
data = json.load(f)
cnt = 0
CYBER_DIR = "/home/zhihaohe/cybertron"
PROTO_DIR = '/home/zhihaohe/cybertron'
container_dirs = ['/home/zhihaohe/container_cybertron/usr/local/include/',
'/home/zhihaohe/container_cybertron/tmp',
'/home/zhihaohe/container_cybertron/usr/include/',
'/home/zhihaohe/container_cybertron/pybind11/include/',
'/home/zhihaohe/container_cybertron/usr/local/apollo',
'/home/zhihaohe/container_cybertron/usr/local/src']
def extract_path_item(path, i):
path.split('/')[i]
def insert_to_DB(db, fn, path):
m = db
items = path.split('/')
i = -1
while abs(i) < len(items) and items[i] in db:
if str is type(db[items[i]]):
old_path = db[items[i]]
stem = old_path.split('/')[i]
assert stem == items[i]
next_stem = old_path.split('/')[i-1]
db[items[i]] = {next_stem: old_path}
else:
assert dict is type(db[items[i]]), "Bug, should only be str or dict, but it is " + type(db[items[i]])
db = db[items[i]]
i -= 1
if abs(i) >= len(items):
raise Exception("insert path twice.%s"%path)
db[items[i]] = path
def build_filepath_db(directory, header_db):
insert_set = set()
for fn in os.listdir(directory):
path = join(directory, fn)
if path == '/home/zhihaohe/usr_local_cybertron/tmp/lz4-1.9.2/lib/lz4.h':
print(directory)
if os.path.isdir(path):
build_filepath_db(path, header_db)
elif path not in insert_set:
insert_set.add(path)
insert_to_DB(header_db, fn, path)
header_db = {}
for d in container_dirs:
header_db[d] = {}
build_filepath_db(d, header_db[d])
with open('header.db', 'w') as f:
json.dump(header_db, f, indent=2)
def change_file(f, new_dir):
items = f.split('/')
a = ''
for i in items:
a = i + '/' + a
if exists(join(new_dir, a)):
return a
return None
def search_db(path, db):
items = path.split('/')
for i in range(-1, -len(items), -1):
if items[i] not in db:
break
if type(db[items[i]]) is str:
return db[items[i]]
assert type(db[items[i]]) is dict
db = db[items[i]]
return None
def change_directory(f, d):
global header_db
if f.endswith('.pb.h') or f.endswith('.pb.cc'):
return f, PROTO_DIR
elif f.endswith('.so') or re.search('so\.\d*', f) or f.endswith('.txx') or f.endswith('.a'):
# TODO process lib properly
return f, d
elif exists(join(CYBER_DIR, f)):
return f, CYBER_DIR
else:
for container_dir, db in header_db.items():
fp = search_db(f, db)
if fp:
assert fp[:len(container_dir)] == container_dir
return fp[len(container_dir)+1:], container_dir
return None, None
def migrate_include(cmd, keyword, new_include_path):
tmpl = '-isystem \S*%s\S*' % keyword
return re.sub(tmpl, '-isystem %s'%new_include_path, cmd)
def remove_include(cmd, keyword):
tmpl = '-isystem \S*%s\S*' % keyword
return re.sub(tmpl, '', cmd)
def insert_include(cmd, path):
p = cmd.find('-isystem')
if -1 == p:
return cmd
return cmd[:p] + ' -isystem %s '%path + cmd[p:]
def process_command(cmd):
# remove not used compile flag
cmd = cmd.replace('-fno-canonical-system-headers', '')
cmd = insert_include(cmd, '/home/zhihaohe/container_cybertron/usr_local/include')
cmd = migrate_include(cmd, 'opencv', '/opt/ros/kinetic/include/opencv-3.3.1-dev/')
cmd = remove_include(cmd, 'boost')
return cmd
new_data = []
unfound_log = open('./not_founded_files.log', 'w')
for l in data:
l['command'] = process_command(l['command'])
# l['directory'] = CYBER_DIR
f, d = change_directory(l['file'], l['directory'])
if f and d:
l['file'] = f
l['directory'] = d
else:
unfound_log.write('%s, %s, %s\n' % (l['directory'], l['file'],
realpath(join(l['directory'],
l['file']))))
new_data.append(l)
shutil.move(file_name, file_name + '.backup')
with open('compile_commands.json', 'w') as f:
json.dump(new_data, f, indent=2)
|
the-stack_0_13617 | # Logging level must be set before importing any stretch_body class
import stretch_body.robot_params
stretch_body.robot_params.RobotParams.set_logging_level("DEBUG")
import unittest
import stretch_body.dynamixel_XL430
import logging
from concurrent.futures import ThreadPoolExecutor
class TestDynamixelXL430(unittest.TestCase):
def test_concurrent_access(self):
"""Verify zero comms errors in concurrent access.
"""
print('Testing Concurrent Access')
servo = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=12,
usb="/dev/hello-dynamixel-head",
logger=logging.getLogger("test_dynamixel"))
self.assertTrue(servo.startup())
def ping_n(n):
# servo.pretty_print() # causes many more servo communications
servo.do_ping()
ns = [1,2,3,4,5]
with ThreadPoolExecutor(max_workers = 2) as executor:
results = executor.map(ping_n, ns)
self.assertEqual(servo.comm_errors, 0)
self.assertTrue(servo.last_comm_success)
servo.stop()
def test_handle_comm_result(self):
"""Verify comm results correctly handled.
"""
print('Testing Handle Comm Result')
servo = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=12,
usb="/dev/hello-dynamixel-head",
logger=logging.getLogger("test_dynamixel"))
self.assertTrue(servo.startup())
ret = servo.handle_comm_result('DXL_TEST', 0, 0)
self.assertTrue(ret)
self.assertTrue(servo.last_comm_success)
self.assertEqual(servo.comm_errors, 0)
self.assertRaises(stretch_body.dynamixel_XL430.DynamixelCommError, servo.handle_comm_result, 'DXL_TEST', -1000, 0) # -1000 = PORT BUSY
self.assertFalse(servo.last_comm_success)
self.assertEqual(servo.comm_errors, 1)
self.assertRaises(stretch_body.dynamixel_XL430.DynamixelCommError, servo.handle_comm_result, 'DXL_TEST', -3002, 1) # -3002 = RX Corrupt
self.assertFalse(servo.last_comm_success)
self.assertEqual(servo.comm_errors, 2)
servo.stop()
def test_change_baud_rate(self, dxl_id=13, usb="/dev/hello-dynamixel-wrist"):
"""Verify can change baud rate.
TODO AE: Restarting a new connection to a just changecd baudrate does not always succeed. Need to close port?
"""
logger = logging.getLogger("test_dynamixel")
start_baud = stretch_body.dynamixel_XL430.DynamixelXL430.identify_baud_rate(dxl_id=dxl_id, usb=usb)
print('Testing changing baud rate from {0} to {1} and back'.format(start_baud, 115200 if start_baud != 115200 else 57600))
servo1 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=start_baud, logger=logger)
self.assertTrue(servo1.do_ping())
curr_baud = servo1.get_baud_rate()
self.assertEqual(curr_baud, start_baud)
self.assertTrue(servo1.do_ping())
# invalid baud goal
goal_baud = 9000
succeeded = servo1.set_baud_rate(goal_baud)
self.assertFalse(succeeded)
curr_baud = servo1.get_baud_rate()
self.assertNotEqual(curr_baud, goal_baud)
self.assertTrue(servo1.do_ping())
# change the baud
goal_baud = 115200 if start_baud != 115200 else 57600
succeeded = servo1.set_baud_rate(goal_baud)
servo1.stop()
self.assertTrue(succeeded)
servo2 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=goal_baud, logger=logger)
curr_baud = servo2.get_baud_rate()
self.assertEqual(curr_baud, goal_baud)
self.assertTrue(servo2.do_ping())
servo2.stop()
servo3 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=start_baud, logger=logger)
self.assertRaises(stretch_body.dynamixel_XL430.DynamixelCommError, servo3.get_baud_rate)
servo3.stop()
# reset baud to its starting baud
servo4 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=goal_baud, logger=logger)
self.assertTrue(servo4.do_ping())
succeeded = servo4.set_baud_rate(start_baud)
self.assertTrue(succeeded)
servo4.stop()
servo5 = stretch_body.dynamixel_XL430.DynamixelXL430(dxl_id=dxl_id, usb=usb, baud=start_baud, logger=logger)
curr_baud = servo5.get_baud_rate()
self.assertEqual(curr_baud, start_baud)
self.assertTrue(servo5.do_ping())
servo5.stop()
|
the-stack_0_13619 | import cv2
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_extractor(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return
for (x, y, w, h) in faces:
cropped_faces = img[y:y + h, x:x + w]
return cropped_faces
cap = cv2.VideoCapture(0)
count = 0
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
file_name_path = '/home/abhishek/Documents/Face/users'+str(count) + '.jpg'
cv2.imwrite(file_name_path, face)
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Face Cropper', face)
else:
print("Face not found")
pass
if cv2.waitKey(1) & 0xFF == ord('q') or count == 500:
break
cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete!!")
|
the-stack_0_13620 | from tqdm import tqdm
import numpy as np
import torch
from typing import Callable, Optional, Union
from alibi_detect.cd.base_online import BaseDriftOnline
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch import zero_diag, quantile
class MMDDriftOnlineTorch(BaseDriftOnline):
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': 'pytorch'})
# set backend
if device is None or device.lower() in ['gpu', 'cuda']:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if self.device.type == 'cpu':
print('No GPU detected, fall back on CPU.')
else:
self.device = torch.device('cpu')
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, np.ndarray) else None
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
self.x_ref = torch.from_numpy(self.x_ref).to(self.device)
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=(sigma is None))
self._configure_thresholds()
self._initialise()
def _configure_ref_subset(self):
etw_size = 2*self.window_size-1 # etw = extended test window
rw_size = self.n - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
mmd_init = None
while mmd_init is None or mmd_init >= self.get_threshold(0):
# Make split
perm = torch.randperm(self.n)
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
self.test_window = self.x_ref[self.init_test_inds]
# Compute initial mmd to check for initial detection
self.k_xx_sub = self.k_xx[self.ref_inds][:, self.ref_inds]
self.k_xx_sub_sum = zero_diag(self.k_xx_sub).sum()/(rw_size*(rw_size-1))
self.k_xy = self.kernel(self.x_ref[self.ref_inds], self.test_window)
k_yy = self.kernel(self.test_window, self.test_window)
mmd_init = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum()/(self.window_size*(self.window_size-1)) -
2*self.k_xy.mean()
)
def _configure_thresholds(self):
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2*w_size-1 # etw = extended test window
rw_size = self.n - etw_size # rw = sub-ref window
perms = [torch.randperm(self.n) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:-etw_size] for perm in perms]
y_inds_all = [perm[-etw_size:] for perm in perms]
if self.verbose:
print("Generating permutations of kernel matrix..")
# Need to compute mmd for each bs for each of W overlapping windows
# Most of the computation can be done once however
# We avoid summing the rw_size^2 submatrix for each bootstrap sample by instead computing the full
# sum once and then subtracting the relavent parts (k_xx_sum = k_full_sum - 2*k_xy_sum - k_yy_sum).
# We also reduce computation of k_xy_sum from O(nW) to O(W) by caching column sums
k_full_sum = zero_diag(self.k_xx).sum()
k_xy_col_sums_all = [
self.k_xx[x_inds][:, y_inds].sum(0) for x_inds, y_inds in
(tqdm(zip(x_inds_all, y_inds_all), total=self.n_bootstraps) if self.verbose else
zip(x_inds_all, y_inds_all))
]
k_xx_sums_all = [(
k_full_sum - zero_diag(self.k_xx[y_inds][:, y_inds]).sum() - 2*k_xy_col_sums.sum()
)/(rw_size*(rw_size-1)) for y_inds, k_xy_col_sums in zip(y_inds_all, k_xy_col_sums_all)]
k_xy_col_sums_all = [k_xy_col_sums/(rw_size*w_size) for k_xy_col_sums in k_xy_col_sums_all]
# Now to iterate through the W overlapping windows
thresholds = []
p_bar = tqdm(range(w_size), "Computing thresholds") if self.verbose else range(w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:w+w_size] for y_inds in y_inds_all] # test windows of size w_size
mmds = [(
k_xx_sum +
zero_diag(self.k_xx[y_inds_w][:, y_inds_w]).sum()/(w_size*(w_size-1)) -
2*k_xy_col_sums[w:w+w_size].sum()
) for k_xx_sum, y_inds_w, k_xy_col_sums in zip(k_xx_sums_all, y_inds_all_w, k_xy_col_sums_all)
]
mmds = torch.tensor(mmds) # an mmd for each bootstrap sample
# Now we discard all bootstrap samples for which mmd is in top (1/ert)% and record the thresholds
thresholds.append(quantile(mmds, 1-self.fpr))
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if mmds[i] < thresholds[-1]]
k_xx_sums_all = [
k_xx_sums_all[i] for i in range(len(k_xx_sums_all)) if mmds[i] < thresholds[-1]
]
k_xy_col_sums_all = [
k_xy_col_sums_all[i] for i in range(len(k_xy_col_sums_all)) if mmds[i] < thresholds[-1]
]
self.thresholds = thresholds
def score(self, x_t: np.ndarray) -> Union[float, None]:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
If the test-window is not yet full then a test-statistic of None is returned.
Parameters
----------
x_t
A single instance.
Returns
-------
Squared MMD estimate between reference window and test window
"""
x_t = torch.from_numpy(x_t[None, :]).to(self.device)
kernel_col = self.kernel(self.x_ref[self.ref_inds], x_t)
self.test_window = torch.cat([self.test_window[(1-self.window_size):], x_t], 0)
self.k_xy = torch.cat([self.k_xy[:, (1-self.window_size):], kernel_col], 1)
k_yy = self.kernel(self.test_window, self.test_window)
mmd = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum()/(self.window_size*(self.window_size-1)) -
2*self.k_xy.mean()
)
return float(mmd.detach().cpu())
|
the-stack_0_13621 | from utils import *
import pickle
import numpy_indexed
import copy
# version of dfs that builds the graph as it traverses it
def dfs_2(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth = 11):
'''
Parameters:
start : str
name of the root node, e.g. 'ctg1'
goals : list of str
list of goal nodes, e.g. goals = ['ctg1', 'ctg2']
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r : numpy indexed object
object that contains info about overlaps
e.g. grouped_c_r[0] gives info about continuing reads for contig1, grouped_c_r[1] for contig2, etc.
grouped_r_r : numpy indexed object
overlaps between reads, similair to grouped_c_r
keys_c_r : list of str
a list of keys for grouped_r_r object, e.g. ['ctg1', 'ctg2']
keys_r_r are used to get the index of contig in grouped_c_r object based on its name
keys_r_r : list of str
similair to keys_c_r
Returns:
paths_to_goals: list of lists of str
e.g. [['ctg1', 'read1', 'ctg2'], ['ctg1', 'read2', 'ctg2']]
a list of paths, where a path is a list of strings
'''
graph = dict()
graph[start] = set(monte_carlo_extending_for_contig(start, side, grouped_c_r, keys_c_r))
paths_to_goals = []
stack = [(start, [start])]
while stack:
# print(stack)
(vertex, path) = stack.pop()
if len(path) <= max_depth:
if vertex in graph:
for next in graph[vertex] - set(path):
if next in goals:
paths_to_goals.append(path + [next])
# print(paths_to_goals)
else:
stack.append((next, path + [next]))
# find new connecting reads for this node if they exist
# if next in graph:
graph[next] = set(monte_carlo_extending_for_read(next, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r))
return paths_to_goals
# def get_n_best_connecting_reads_for_contig(contig, grouped, keys, side, n = 2, first_approach = True):
# group = grouped[keys.index(contig)]
# connecting_reads = group[np.where(group['extension_side'] == side)]
# if first_approach:
# ind = np.lexsort((connecting_reads['SI'], connecting_reads['OS']))
# else:
# ind = np.lexsort((connecting_reads['SI'], connecting_reads['ES']))
# return connecting_reads[ind][-n:]['query_name']
# def get_n_best_connecting_reads_for_read(read, side, grouped_r_r, keys_r_r, grouped_c_r, keys_c_r, n = 2, first_approach = True):
# if side == 'right':
# other_side = 'left'
# else:
# other_side = 'right'
# second_group = []
# for i in range(len(keys_c_r)):
# group = grouped_c_r[i]
# for j in group:
# if j['query_name'] == read and j['extension_side'] == other_side:
# new_row = copy.deepcopy(j)
# new_row['query_name'] = keys_c_r[i]
# second_group.append(new_row)
# # print(type(second_group))
# # print(second_group)
# if read in keys_r_r:
# group = grouped_r_r[keys_r_r.index(read)]
# connecting_reads = group[np.where(group['extension_side'] == side)]
# final_group = connecting_reads
# # final_group = []
# if second_group != []:
# final_group = np.append(connecting_reads, second_group)
# # if second_group != []:
# # final_group.append(second_group)
# if first_approach:
# ind = np.lexsort((final_group['SI'], final_group['OS']))
# else:
# ind = np.lexsort((final_group['SI'], final_group['ES']))
# return final_group[ind][-n:]['query_name']
# else:
# return []
def monte_carlo_extending_for_contig(contig, side, grouped_c_r, keys_c_r,):
'''
Parameters:
contig : str
name of the starting contig, e.g. 'ctg1'
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r : numpy indexed object
object that contains info about overlaps
e.g. grouped_c_r[0] gives info about continuing reads for contig1, grouped_c_r[1] for contig2, etc.
keys_c_r : list of str
a list of keys for grouped_r_r object, e.g. ['ctg1', 'ctg2']
keys_r_r are used to get the index of contig in grouped_c_r object based on its name
Returns:
chosen_read : list of one str
read that the monte carlo approach found as a continuing read for a given contig
in a list because method dfs_2 expects a list of continuing reads. dfs_2 written that way
because of first two approaches
In case it doesnt find it, method returns empty list so that dfs_2 method doesnt crash
subject to change ???!!!
Karlo, feel free to change this so that it returns a string, and change dfs_2 so that is accepts a string
if that suits you
'''
group = grouped_c_r[keys_c_r.index(contig)]
group = group[np.where(group['extension_side'] == side)]
group = group[np.where(group['ES'] >= 0)]
if group.size != 0:
reads_ES = group['ES']
sum_ES = np.sum(reads_ES)
probabilities = [x / sum_ES for x in reads_ES]
chosen_read = np.random.choice(a = group, p = probabilities)
chosen_read = [chosen_read['query_name']]
return chosen_read
else:
return []
def monte_carlo_extending_for_read(read, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r):
'''
Parameters:
read : str
name of the read for which we want to find continuing read, e.g. 'read00291'
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r/grouped_r_r/keys_c_r/keys_r_r
look at previous comments in other methods
Returns:
chosen_read : list of one str
read that the monte carlo approach found as a continuing read for a given read
in a list because method dfs_2 expects a list of continuing reads. dfs_2 written that way
because of first two approaches
In case it doesnt find it, method returns empty list so that dfs_2 method doesnt crash
subject to change ???
Karlo, feel free to change this so that it returns a string, and change dfs_2 so that is accepts a string
if that suits you
'''
if side == 'right':
other_side = 'left'
else:
other_side = 'right'
second_group = []
for i in range(len(keys_c_r)):
group = grouped_c_r[i]
for j in group:
if j['query_name'] == read and j['extension_side'] == other_side and j['ES'] > 0:
new_row = copy.deepcopy(j)
new_row['query_name'] = keys_c_r[i]
second_group.append(new_row)
if read in keys_r_r:
group = grouped_r_r[keys_r_r.index(read)]
group = group[np.where(group['extension_side'] == side)]
group = group[np.where(group['ES'] >= 0)]
if second_group != []:
group = np.append(group, second_group)
if group.size != 0:
reads_ES = group['ES']
sum_ES = np.sum(reads_ES)
probabilities = [x / sum_ES for x in reads_ES]
chosen_read = np.random.choice(a = group, p = probabilities)
chosen_read = [chosen_read['query_name']]
return chosen_read
else:
return []
else:
return []
def try_monte_carlo(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth=50, n_times = 100):
'''
Parameters:
start : str
name of the root node, e.g. 'ctg1'
goals : list of str
list of goal nodes, e.g. goals = ['ctg1', 'ctg2']
side : str
side to which we try to build path, right or left
if side == 'right', continuing read is building path on the right side of target node
grouped_c_r : numpy indexed object
object that contains info about overlaps
e.g. grouped_c_r[0] gives info about continuing reads for contig1, grouped_c_r[1] for contig2, etc.
grouped_r_r : numpy indexed object
overlaps between reads, similair to grouped_c_r
keys_c_r : list of str
a list of keys for grouped_r_r object, e.g. ['ctg1', 'ctg2']
keys_r_r are used to get the index of contig in grouped_c_r object based on its name
keys_r_r : list of str
similair to keys_c_r
max_depth : int
maximum number of nodes in a path
n_times : int
number of times that we try to get a path with monte carlo approach
given a 1000 tries, we usually find around 5-10 paths
Returns:
all_paths: list of lists of paths
e.g. [[['ctg1', 'read1', 'ctg2']], [['ctg1', 'read2', 'ctg2']]]
probably one level of unnecessary complication, built that way because of first two approaches
where we expected to find more than one path per dfs search, SUBJECT TO CHANGE, you could
change this so that all_paths is a list of paths, not a list inside list
'''
all_paths = []
for i in range(n_times):
paths = dfs_2(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth=max_depth)
if paths != []:
all_paths.append(paths)
return all_paths
if __name__ == '__main__':
# data = {'A': set(['r1', 'r2', 'r3']),
# 'r1': set(['r4', 'r5']),
# 'r2': set(['r6', 'r7']),
# 'r3': set(['r8', 'r9']),
# 'r4': set(['A1', 'r10']),
# 'r5': set(['r11', 'r12']),
# 'r9': set(['r13', 'r14']),
# 'r11': set(['A2', 'r16']),
# 'r10': set(['r4']),
# 'r13': set(['A3', 'r15']),
# 'r15': set(['r17', 'r18'])}
# start = 'A'
# goals = ['A1', 'A2', 'A3']
# paths = dfs_2(data, start, goals)
# print(paths)
# testing on fake data
# grouped_c_r = [np.array([('read1', 1, 2, 3, 'right')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')])), np.array([('read3', 1, 2, 3, 'left')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')]))]
# keys_c_r = ['ctg1', 'ctg2']
# grouped_r_r = [np.array([('read2', 1, 2, 3, 'right')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')])), np.array([('read3', 1, 2, 3, 'right')], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')])), np.array([], dtype=(np.record, [('query_name', '<U250'), ('SI', '<f8'), ('OS', '<f8'), ('ES', '<f8'), ('extension_side', '<U25')]))]
# keys_r_r = ['read1', 'read2', 'read3']
with open('grouped_data_c_r', 'rb') as grouped_c_r_file:
grouped_c_r = pickle.load(grouped_c_r_file)
with open('grouped_data_r_r', 'rb') as grouped_r_r_file:
grouped_r_r = pickle.load(grouped_r_r_file)
with open('keys_c_r', 'rb') as keys_c_r_file:
keys_c_r = pickle.load(keys_c_r_file).tolist()
with open('keys_r_r', 'rb') as keys_r_r_file:
keys_r_r = pickle.load(keys_r_r_file).tolist()
start = keys_c_r[1]
goals = keys_c_r
side = 'left'
paths = try_monte_carlo(start, goals, side, grouped_c_r, keys_c_r, grouped_r_r, keys_r_r, max_depth=30, n_times = 500)
# with open('mc_ctg3_right', 'wb') as paths_right_side_file:
# pickle.dump(paths, paths_right_side_file)
print(paths) |
the-stack_0_13622 | """ILI9341 demo (Scrolling Marquee)."""
from ili9341 import Display, color565
from time import sleep
from sys import implementation
def test():
"""Scrolling Marquee."""
try:
# Implementation dependant pin and SPI configuration
if implementation.name == 'circuitpython':
import board
from busio import SPI
from digitalio import DigitalInOut
cs_pin = DigitalInOut(board.P0_15)
dc_pin = DigitalInOut(board.P0_17)
rst_pin = DigitalInOut(board.P0_20)
spi = SPI(clock=board.P0_24, MOSI=board.P0_22)
else:
from machine import Pin, SPI
cs_pin = Pin(16)
dc_pin = Pin(4)
rst_pin = Pin(17)
# Baud rate of 40000000 seems about the max
spi = SPI(1, baudrate=40000000, sck=Pin(14), mosi=Pin(13))
# Create the ILI9341 display:
display = Display(spi, dc=dc_pin, cs=cs_pin, rst=rst_pin)
display.clear()
# Draw non-moving circles
display.fill_rectangle(0, 0, 239, 99, color565(27, 72, 156))
display.fill_rectangle(0, 168, 239, 151, color565(220, 27, 72))
# Load Marquee image
display.draw_image('images/Rototron128x26.raw', 56, 120, 128, 26)
# Set up scrolling
display.set_scroll(top=152, bottom=100)
spectrum = list(range(152, 221)) + list(reversed(range(152, 220)))
while True:
for y in spectrum:
display.scroll(y)
sleep(.1)
except KeyboardInterrupt:
display.cleanup()
test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.